xref: /linux/drivers/misc/sgi-xp/xpc_main.c (revision be54f8c558027a218423134dd9b8c7c46d92204a)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
7  * Copyright (c) 2004-2009 Silicon Graphics, Inc.  All Rights Reserved.
8  */
9 
10 /*
11  * Cross Partition Communication (XPC) support - standard version.
12  *
13  *	XPC provides a message passing capability that crosses partition
14  *	boundaries. This module is made up of two parts:
15  *
16  *	    partition	This part detects the presence/absence of other
17  *			partitions. It provides a heartbeat and monitors
18  *			the heartbeats of other partitions.
19  *
20  *	    channel	This part manages the channels and sends/receives
21  *			messages across them to/from other partitions.
22  *
23  *	There are a couple of additional functions residing in XP, which
24  *	provide an interface to XPC for its users.
25  *
26  *
27  *	Caveats:
28  *
29  *	  . Currently on sn2, we have no way to determine which nasid an IRQ
30  *	    came from. Thus, xpc_send_IRQ_sn2() does a remote amo write
31  *	    followed by an IPI. The amo indicates where data is to be pulled
32  *	    from, so after the IPI arrives, the remote partition checks the amo
33  *	    word. The IPI can actually arrive before the amo however, so other
34  *	    code must periodically check for this case. Also, remote amo
35  *	    operations do not reliably time out. Thus we do a remote PIO read
36  *	    solely to know whether the remote partition is down and whether we
37  *	    should stop sending IPIs to it. This remote PIO read operation is
38  *	    set up in a special nofault region so SAL knows to ignore (and
39  *	    cleanup) any errors due to the remote amo write, PIO read, and/or
40  *	    PIO write operations.
41  *
42  *	    If/when new hardware solves this IPI problem, we should abandon
43  *	    the current approach.
44  *
45  */
46 
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/sysctl.h>
50 #include <linux/device.h>
51 #include <linux/delay.h>
52 #include <linux/reboot.h>
53 #include <linux/kdebug.h>
54 #include <linux/kthread.h>
55 #include "xpc.h"
56 
57 #ifdef CONFIG_X86_64
58 #include <asm/traps.h>
59 #endif
60 
61 /* define two XPC debug device structures to be used with dev_dbg() et al */
62 
63 static struct device_driver xpc_dbg_name = {
64 	.name = "xpc"
65 };
66 
67 static struct device xpc_part_dbg_subname = {
68 	.init_name = "",	/* set to "part" at xpc_init() time */
69 	.driver = &xpc_dbg_name
70 };
71 
72 static struct device xpc_chan_dbg_subname = {
73 	.init_name = "",	/* set to "chan" at xpc_init() time */
74 	.driver = &xpc_dbg_name
75 };
76 
77 struct device *xpc_part = &xpc_part_dbg_subname;
78 struct device *xpc_chan = &xpc_chan_dbg_subname;
79 
80 static int xpc_kdebug_ignore;
81 
82 /* systune related variables for /proc/sys directories */
83 
84 static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
85 static int xpc_hb_min_interval = 1;
86 static int xpc_hb_max_interval = 10;
87 
88 static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
89 static int xpc_hb_check_min_interval = 10;
90 static int xpc_hb_check_max_interval = 120;
91 
92 int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
93 static int xpc_disengage_min_timelimit;	/* = 0 */
94 static int xpc_disengage_max_timelimit = 120;
95 
96 static const struct ctl_table xpc_sys_xpc_hb[] = {
97 	{
98 	 .procname = "hb_interval",
99 	 .data = &xpc_hb_interval,
100 	 .maxlen = sizeof(int),
101 	 .mode = 0644,
102 	 .proc_handler = proc_dointvec_minmax,
103 	 .extra1 = &xpc_hb_min_interval,
104 	 .extra2 = &xpc_hb_max_interval},
105 	{
106 	 .procname = "hb_check_interval",
107 	 .data = &xpc_hb_check_interval,
108 	 .maxlen = sizeof(int),
109 	 .mode = 0644,
110 	 .proc_handler = proc_dointvec_minmax,
111 	 .extra1 = &xpc_hb_check_min_interval,
112 	 .extra2 = &xpc_hb_check_max_interval},
113 };
114 static const struct ctl_table xpc_sys_xpc[] = {
115 	{
116 	 .procname = "disengage_timelimit",
117 	 .data = &xpc_disengage_timelimit,
118 	 .maxlen = sizeof(int),
119 	 .mode = 0644,
120 	 .proc_handler = proc_dointvec_minmax,
121 	 .extra1 = &xpc_disengage_min_timelimit,
122 	 .extra2 = &xpc_disengage_max_timelimit},
123 };
124 
125 static struct ctl_table_header *xpc_sysctl;
126 static struct ctl_table_header *xpc_sysctl_hb;
127 
128 /* non-zero if any remote partition disengage was timed out */
129 int xpc_disengage_timedout;
130 
131 /* #of activate IRQs received and not yet processed */
132 int xpc_activate_IRQ_rcvd;
133 DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock);
134 
135 /* IRQ handler notifies this wait queue on receipt of an IRQ */
136 DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
137 
138 static unsigned long xpc_hb_check_timeout;
139 static struct timer_list xpc_hb_timer;
140 
141 /* notification that the xpc_hb_checker thread has exited */
142 static DECLARE_COMPLETION(xpc_hb_checker_exited);
143 
144 /* notification that the xpc_discovery thread has exited */
145 static DECLARE_COMPLETION(xpc_discovery_exited);
146 
147 static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
148 
149 static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
150 static struct notifier_block xpc_reboot_notifier = {
151 	.notifier_call = xpc_system_reboot,
152 };
153 
154 static int xpc_system_die(struct notifier_block *, unsigned long, void *);
155 static struct notifier_block xpc_die_notifier = {
156 	.notifier_call = xpc_system_die,
157 };
158 
159 struct xpc_arch_operations xpc_arch_ops;
160 
161 /*
162  * Timer function to enforce the timelimit on the partition disengage.
163  */
164 static void
xpc_timeout_partition_disengage(struct timer_list * t)165 xpc_timeout_partition_disengage(struct timer_list *t)
166 {
167 	struct xpc_partition *part = timer_container_of(part, t,
168 							disengage_timer);
169 
170 	DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
171 
172 	xpc_partition_disengaged_from_timer(part);
173 
174 	DBUG_ON(part->disengage_timeout != 0);
175 	DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part)));
176 }
177 
178 /*
179  * Timer to produce the heartbeat.  The timer structures function is
180  * already set when this is initially called.  A tunable is used to
181  * specify when the next timeout should occur.
182  */
183 static void
xpc_hb_beater(struct timer_list * unused)184 xpc_hb_beater(struct timer_list *unused)
185 {
186 	xpc_arch_ops.increment_heartbeat();
187 
188 	if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
189 		wake_up_interruptible(&xpc_activate_IRQ_wq);
190 
191 	xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
192 	add_timer(&xpc_hb_timer);
193 }
194 
195 static void
xpc_start_hb_beater(void)196 xpc_start_hb_beater(void)
197 {
198 	xpc_arch_ops.heartbeat_init();
199 	timer_setup(&xpc_hb_timer, xpc_hb_beater, 0);
200 	xpc_hb_beater(NULL);
201 }
202 
203 static void
xpc_stop_hb_beater(void)204 xpc_stop_hb_beater(void)
205 {
206 	timer_delete_sync(&xpc_hb_timer);
207 	xpc_arch_ops.heartbeat_exit();
208 }
209 
210 /*
211  * At periodic intervals, scan through all active partitions and ensure
212  * their heartbeat is still active.  If not, the partition is deactivated.
213  */
214 static void
xpc_check_remote_hb(void)215 xpc_check_remote_hb(void)
216 {
217 	struct xpc_partition *part;
218 	short partid;
219 	enum xp_retval ret;
220 
221 	for (partid = 0; partid < xp_max_npartitions; partid++) {
222 
223 		if (xpc_exiting)
224 			break;
225 
226 		if (partid == xp_partition_id)
227 			continue;
228 
229 		part = &xpc_partitions[partid];
230 
231 		if (part->act_state == XPC_P_AS_INACTIVE ||
232 		    part->act_state == XPC_P_AS_DEACTIVATING) {
233 			continue;
234 		}
235 
236 		ret = xpc_arch_ops.get_remote_heartbeat(part);
237 		if (ret != xpSuccess)
238 			XPC_DEACTIVATE_PARTITION(part, ret);
239 	}
240 }
241 
242 /*
243  * This thread is responsible for nearly all of the partition
244  * activation/deactivation.
245  */
246 static int
xpc_hb_checker(void * ignore)247 xpc_hb_checker(void *ignore)
248 {
249 	int force_IRQ = 0;
250 
251 	/* this thread was marked active by xpc_hb_init() */
252 
253 	set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
254 
255 	/* set our heartbeating to other partitions into motion */
256 	xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
257 	xpc_start_hb_beater();
258 
259 	while (!xpc_exiting) {
260 
261 		dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
262 			"been received\n",
263 			(int)(xpc_hb_check_timeout - jiffies),
264 			xpc_activate_IRQ_rcvd);
265 
266 		/* checking of remote heartbeats is skewed by IRQ handling */
267 		if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
268 			xpc_hb_check_timeout = jiffies +
269 			    (xpc_hb_check_interval * HZ);
270 
271 			dev_dbg(xpc_part, "checking remote heartbeats\n");
272 			xpc_check_remote_hb();
273 		}
274 
275 		/* check for outstanding IRQs */
276 		if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) {
277 			force_IRQ = 0;
278 			dev_dbg(xpc_part, "processing activate IRQs "
279 				"received\n");
280 			xpc_arch_ops.process_activate_IRQ_rcvd();
281 		}
282 
283 		/* wait for IRQ or timeout */
284 		(void)wait_event_interruptible(xpc_activate_IRQ_wq,
285 					       (time_is_before_eq_jiffies(
286 						xpc_hb_check_timeout) ||
287 						xpc_activate_IRQ_rcvd > 0 ||
288 						xpc_exiting));
289 	}
290 
291 	xpc_stop_hb_beater();
292 
293 	dev_dbg(xpc_part, "heartbeat checker is exiting\n");
294 
295 	/* mark this thread as having exited */
296 	complete(&xpc_hb_checker_exited);
297 	return 0;
298 }
299 
300 /*
301  * This thread will attempt to discover other partitions to activate
302  * based on info provided by SAL. This new thread is short lived and
303  * will exit once discovery is complete.
304  */
305 static int
xpc_initiate_discovery(void * ignore)306 xpc_initiate_discovery(void *ignore)
307 {
308 	xpc_discovery();
309 
310 	dev_dbg(xpc_part, "discovery thread is exiting\n");
311 
312 	/* mark this thread as having exited */
313 	complete(&xpc_discovery_exited);
314 	return 0;
315 }
316 
317 /*
318  * The first kthread assigned to a newly activated partition is the one
319  * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
320  * that kthread until the partition is brought down, at which time that kthread
321  * returns back to XPC HB. (The return of that kthread will signify to XPC HB
322  * that XPC has dismantled all communication infrastructure for the associated
323  * partition.) This kthread becomes the channel manager for that partition.
324  *
325  * Each active partition has a channel manager, who, besides connecting and
326  * disconnecting channels, will ensure that each of the partition's connected
327  * channels has the required number of assigned kthreads to get the work done.
328  */
329 static void
xpc_channel_mgr(struct xpc_partition * part)330 xpc_channel_mgr(struct xpc_partition *part)
331 {
332 	while (part->act_state != XPC_P_AS_DEACTIVATING ||
333 	       atomic_read(&part->nchannels_active) > 0 ||
334 	       !xpc_partition_disengaged(part)) {
335 
336 		xpc_process_sent_chctl_flags(part);
337 
338 		/*
339 		 * Wait until we've been requested to activate kthreads or
340 		 * all of the channel's message queues have been torn down or
341 		 * a signal is pending.
342 		 *
343 		 * The channel_mgr_requests is set to 1 after being awakened,
344 		 * This is done to prevent the channel mgr from making one pass
345 		 * through the loop for each request, since he will
346 		 * be servicing all the requests in one pass. The reason it's
347 		 * set to 1 instead of 0 is so that other kthreads will know
348 		 * that the channel mgr is running and won't bother trying to
349 		 * wake him up.
350 		 */
351 		atomic_dec(&part->channel_mgr_requests);
352 		(void)wait_event_interruptible(part->channel_mgr_wq,
353 				(atomic_read(&part->channel_mgr_requests) > 0 ||
354 				 part->chctl.all_flags != 0 ||
355 				 (part->act_state == XPC_P_AS_DEACTIVATING &&
356 				 atomic_read(&part->nchannels_active) == 0 &&
357 				 xpc_partition_disengaged(part))));
358 		atomic_set(&part->channel_mgr_requests, 1);
359 	}
360 }
361 
362 /*
363  * Guarantee that the kzalloc'd memory is cacheline aligned.
364  */
365 void *
xpc_kzalloc_cacheline_aligned(size_t size,gfp_t flags,void ** base)366 xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
367 {
368 	/* see if kzalloc will give us cachline aligned memory by default */
369 	*base = kzalloc(size, flags);
370 	if (*base == NULL)
371 		return NULL;
372 
373 	if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
374 		return *base;
375 
376 	kfree(*base);
377 
378 	/* nope, we'll have to do it ourselves */
379 	*base = kzalloc(size + L1_CACHE_BYTES, flags);
380 	if (*base == NULL)
381 		return NULL;
382 
383 	return (void *)L1_CACHE_ALIGN((u64)*base);
384 }
385 
386 /*
387  * Setup the channel structures necessary to support XPartition Communication
388  * between the specified remote partition and the local one.
389  */
390 static enum xp_retval
xpc_setup_ch_structures(struct xpc_partition * part)391 xpc_setup_ch_structures(struct xpc_partition *part)
392 {
393 	enum xp_retval ret;
394 	int ch_number;
395 	struct xpc_channel *ch;
396 	short partid = XPC_PARTID(part);
397 
398 	/*
399 	 * Allocate all of the channel structures as a contiguous chunk of
400 	 * memory.
401 	 */
402 	DBUG_ON(part->channels != NULL);
403 	part->channels = kcalloc(XPC_MAX_NCHANNELS,
404 				 sizeof(struct xpc_channel),
405 				 GFP_KERNEL);
406 	if (part->channels == NULL) {
407 		dev_err(xpc_chan, "can't get memory for channels\n");
408 		return xpNoMemory;
409 	}
410 
411 	/* allocate the remote open and close args */
412 
413 	part->remote_openclose_args =
414 	    xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
415 					  GFP_KERNEL, &part->
416 					  remote_openclose_args_base);
417 	if (part->remote_openclose_args == NULL) {
418 		dev_err(xpc_chan, "can't get memory for remote connect args\n");
419 		ret = xpNoMemory;
420 		goto out_1;
421 	}
422 
423 	part->chctl.all_flags = 0;
424 	spin_lock_init(&part->chctl_lock);
425 
426 	atomic_set(&part->channel_mgr_requests, 1);
427 	init_waitqueue_head(&part->channel_mgr_wq);
428 
429 	part->nchannels = XPC_MAX_NCHANNELS;
430 
431 	atomic_set(&part->nchannels_active, 0);
432 	atomic_set(&part->nchannels_engaged, 0);
433 
434 	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
435 		ch = &part->channels[ch_number];
436 
437 		ch->partid = partid;
438 		ch->number = ch_number;
439 		ch->flags = XPC_C_DISCONNECTED;
440 
441 		atomic_set(&ch->kthreads_assigned, 0);
442 		atomic_set(&ch->kthreads_idle, 0);
443 		atomic_set(&ch->kthreads_active, 0);
444 
445 		atomic_set(&ch->references, 0);
446 		atomic_set(&ch->n_to_notify, 0);
447 
448 		spin_lock_init(&ch->lock);
449 		init_completion(&ch->wdisconnect_wait);
450 
451 		atomic_set(&ch->n_on_msg_allocate_wq, 0);
452 		init_waitqueue_head(&ch->msg_allocate_wq);
453 		init_waitqueue_head(&ch->idle_wq);
454 	}
455 
456 	ret = xpc_arch_ops.setup_ch_structures(part);
457 	if (ret != xpSuccess)
458 		goto out_2;
459 
460 	/*
461 	 * With the setting of the partition setup_state to XPC_P_SS_SETUP,
462 	 * we're declaring that this partition is ready to go.
463 	 */
464 	part->setup_state = XPC_P_SS_SETUP;
465 
466 	return xpSuccess;
467 
468 	/* setup of ch structures failed */
469 out_2:
470 	kfree(part->remote_openclose_args_base);
471 	part->remote_openclose_args = NULL;
472 out_1:
473 	kfree(part->channels);
474 	part->channels = NULL;
475 	return ret;
476 }
477 
478 /*
479  * Teardown the channel structures necessary to support XPartition Communication
480  * between the specified remote partition and the local one.
481  */
482 static void
xpc_teardown_ch_structures(struct xpc_partition * part)483 xpc_teardown_ch_structures(struct xpc_partition *part)
484 {
485 	DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
486 	DBUG_ON(atomic_read(&part->nchannels_active) != 0);
487 
488 	/*
489 	 * Make this partition inaccessible to local processes by marking it
490 	 * as no longer setup. Then wait before proceeding with the teardown
491 	 * until all existing references cease.
492 	 */
493 	DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
494 	part->setup_state = XPC_P_SS_WTEARDOWN;
495 
496 	wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
497 
498 	/* now we can begin tearing down the infrastructure */
499 
500 	xpc_arch_ops.teardown_ch_structures(part);
501 
502 	kfree(part->remote_openclose_args_base);
503 	part->remote_openclose_args = NULL;
504 	kfree(part->channels);
505 	part->channels = NULL;
506 
507 	part->setup_state = XPC_P_SS_TORNDOWN;
508 }
509 
510 /*
511  * When XPC HB determines that a partition has come up, it will create a new
512  * kthread and that kthread will call this function to attempt to set up the
513  * basic infrastructure used for Cross Partition Communication with the newly
514  * upped partition.
515  *
516  * The kthread that was created by XPC HB and which setup the XPC
517  * infrastructure will remain assigned to the partition becoming the channel
518  * manager for that partition until the partition is deactivating, at which
519  * time the kthread will teardown the XPC infrastructure and then exit.
520  */
521 static int
xpc_activating(void * __partid)522 xpc_activating(void *__partid)
523 {
524 	short partid = (u64)__partid;
525 	struct xpc_partition *part = &xpc_partitions[partid];
526 	unsigned long irq_flags;
527 
528 	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
529 
530 	spin_lock_irqsave(&part->act_lock, irq_flags);
531 
532 	if (part->act_state == XPC_P_AS_DEACTIVATING) {
533 		part->act_state = XPC_P_AS_INACTIVE;
534 		spin_unlock_irqrestore(&part->act_lock, irq_flags);
535 		part->remote_rp_pa = 0;
536 		return 0;
537 	}
538 
539 	/* indicate the thread is activating */
540 	DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
541 	part->act_state = XPC_P_AS_ACTIVATING;
542 
543 	XPC_SET_REASON(part, 0, 0);
544 	spin_unlock_irqrestore(&part->act_lock, irq_flags);
545 
546 	dev_dbg(xpc_part, "activating partition %d\n", partid);
547 
548 	xpc_arch_ops.allow_hb(partid);
549 
550 	if (xpc_setup_ch_structures(part) == xpSuccess) {
551 		(void)xpc_part_ref(part);	/* this will always succeed */
552 
553 		if (xpc_arch_ops.make_first_contact(part) == xpSuccess) {
554 			xpc_mark_partition_active(part);
555 			xpc_channel_mgr(part);
556 			/* won't return until partition is deactivating */
557 		}
558 
559 		xpc_part_deref(part);
560 		xpc_teardown_ch_structures(part);
561 	}
562 
563 	xpc_arch_ops.disallow_hb(partid);
564 	xpc_mark_partition_inactive(part);
565 
566 	if (part->reason == xpReactivating) {
567 		/* interrupting ourselves results in activating partition */
568 		xpc_arch_ops.request_partition_reactivation(part);
569 	}
570 
571 	return 0;
572 }
573 
574 void
xpc_activate_partition(struct xpc_partition * part)575 xpc_activate_partition(struct xpc_partition *part)
576 {
577 	short partid = XPC_PARTID(part);
578 	unsigned long irq_flags;
579 	struct task_struct *kthread;
580 
581 	spin_lock_irqsave(&part->act_lock, irq_flags);
582 
583 	DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
584 
585 	part->act_state = XPC_P_AS_ACTIVATION_REQ;
586 	XPC_SET_REASON(part, xpCloneKThread, __LINE__);
587 
588 	spin_unlock_irqrestore(&part->act_lock, irq_flags);
589 
590 	kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
591 			      partid);
592 	if (IS_ERR(kthread)) {
593 		spin_lock_irqsave(&part->act_lock, irq_flags);
594 		part->act_state = XPC_P_AS_INACTIVE;
595 		XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
596 		spin_unlock_irqrestore(&part->act_lock, irq_flags);
597 	}
598 }
599 
600 void
xpc_activate_kthreads(struct xpc_channel * ch,int needed)601 xpc_activate_kthreads(struct xpc_channel *ch, int needed)
602 {
603 	int idle = atomic_read(&ch->kthreads_idle);
604 	int assigned = atomic_read(&ch->kthreads_assigned);
605 	int wakeup;
606 
607 	DBUG_ON(needed <= 0);
608 
609 	if (idle > 0) {
610 		wakeup = (needed > idle) ? idle : needed;
611 		needed -= wakeup;
612 
613 		dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
614 			"channel=%d\n", wakeup, ch->partid, ch->number);
615 
616 		/* only wakeup the requested number of kthreads */
617 		wake_up_nr(&ch->idle_wq, wakeup);
618 	}
619 
620 	if (needed <= 0)
621 		return;
622 
623 	if (needed + assigned > ch->kthreads_assigned_limit) {
624 		needed = ch->kthreads_assigned_limit - assigned;
625 		if (needed <= 0)
626 			return;
627 	}
628 
629 	dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
630 		needed, ch->partid, ch->number);
631 
632 	xpc_create_kthreads(ch, needed, 0);
633 }
634 
635 /*
636  * This function is where XPC's kthreads wait for messages to deliver.
637  */
638 static void
xpc_kthread_waitmsgs(struct xpc_partition * part,struct xpc_channel * ch)639 xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
640 {
641 	int (*n_of_deliverable_payloads) (struct xpc_channel *) =
642 		xpc_arch_ops.n_of_deliverable_payloads;
643 
644 	do {
645 		/* deliver messages to their intended recipients */
646 
647 		while (n_of_deliverable_payloads(ch) > 0 &&
648 		       !(ch->flags & XPC_C_DISCONNECTING)) {
649 			xpc_deliver_payload(ch);
650 		}
651 
652 		if (atomic_inc_return(&ch->kthreads_idle) >
653 		    ch->kthreads_idle_limit) {
654 			/* too many idle kthreads on this channel */
655 			atomic_dec(&ch->kthreads_idle);
656 			break;
657 		}
658 
659 		dev_dbg(xpc_chan, "idle kthread calling "
660 			"wait_event_interruptible_exclusive()\n");
661 
662 		(void)wait_event_interruptible_exclusive(ch->idle_wq,
663 				(n_of_deliverable_payloads(ch) > 0 ||
664 				 (ch->flags & XPC_C_DISCONNECTING)));
665 
666 		atomic_dec(&ch->kthreads_idle);
667 
668 	} while (!(ch->flags & XPC_C_DISCONNECTING));
669 }
670 
671 static int
xpc_kthread_start(void * args)672 xpc_kthread_start(void *args)
673 {
674 	short partid = XPC_UNPACK_ARG1(args);
675 	u16 ch_number = XPC_UNPACK_ARG2(args);
676 	struct xpc_partition *part = &xpc_partitions[partid];
677 	struct xpc_channel *ch;
678 	int n_needed;
679 	unsigned long irq_flags;
680 	int (*n_of_deliverable_payloads) (struct xpc_channel *) =
681 		xpc_arch_ops.n_of_deliverable_payloads;
682 
683 	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
684 		partid, ch_number);
685 
686 	ch = &part->channels[ch_number];
687 
688 	if (!(ch->flags & XPC_C_DISCONNECTING)) {
689 
690 		/* let registerer know that connection has been established */
691 
692 		spin_lock_irqsave(&ch->lock, irq_flags);
693 		if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
694 			ch->flags |= XPC_C_CONNECTEDCALLOUT;
695 			spin_unlock_irqrestore(&ch->lock, irq_flags);
696 
697 			xpc_connected_callout(ch);
698 
699 			spin_lock_irqsave(&ch->lock, irq_flags);
700 			ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
701 			spin_unlock_irqrestore(&ch->lock, irq_flags);
702 
703 			/*
704 			 * It is possible that while the callout was being
705 			 * made that the remote partition sent some messages.
706 			 * If that is the case, we may need to activate
707 			 * additional kthreads to help deliver them. We only
708 			 * need one less than total #of messages to deliver.
709 			 */
710 			n_needed = n_of_deliverable_payloads(ch) - 1;
711 			if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
712 				xpc_activate_kthreads(ch, n_needed);
713 
714 		} else {
715 			spin_unlock_irqrestore(&ch->lock, irq_flags);
716 		}
717 
718 		xpc_kthread_waitmsgs(part, ch);
719 	}
720 
721 	/* let registerer know that connection is disconnecting */
722 
723 	spin_lock_irqsave(&ch->lock, irq_flags);
724 	if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
725 	    !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
726 		ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
727 		spin_unlock_irqrestore(&ch->lock, irq_flags);
728 
729 		xpc_disconnect_callout(ch, xpDisconnecting);
730 
731 		spin_lock_irqsave(&ch->lock, irq_flags);
732 		ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
733 	}
734 	spin_unlock_irqrestore(&ch->lock, irq_flags);
735 
736 	if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
737 	    atomic_dec_return(&part->nchannels_engaged) == 0) {
738 		xpc_arch_ops.indicate_partition_disengaged(part);
739 	}
740 
741 	xpc_msgqueue_deref(ch);
742 
743 	dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
744 		partid, ch_number);
745 
746 	xpc_part_deref(part);
747 	return 0;
748 }
749 
750 /*
751  * For each partition that XPC has established communications with, there is
752  * a minimum of one kernel thread assigned to perform any operation that
753  * may potentially sleep or block (basically the callouts to the asynchronous
754  * functions registered via xpc_connect()).
755  *
756  * Additional kthreads are created and destroyed by XPC as the workload
757  * demands.
758  *
759  * A kthread is assigned to one of the active channels that exists for a given
760  * partition.
761  */
762 void
xpc_create_kthreads(struct xpc_channel * ch,int needed,int ignore_disconnecting)763 xpc_create_kthreads(struct xpc_channel *ch, int needed,
764 		    int ignore_disconnecting)
765 {
766 	unsigned long irq_flags;
767 	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
768 	struct xpc_partition *part = &xpc_partitions[ch->partid];
769 	struct task_struct *kthread;
770 	void (*indicate_partition_disengaged) (struct xpc_partition *) =
771 		xpc_arch_ops.indicate_partition_disengaged;
772 
773 	while (needed-- > 0) {
774 
775 		/*
776 		 * The following is done on behalf of the newly created
777 		 * kthread. That kthread is responsible for doing the
778 		 * counterpart to the following before it exits.
779 		 */
780 		if (ignore_disconnecting) {
781 			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
782 				/* kthreads assigned had gone to zero */
783 				BUG_ON(!(ch->flags &
784 					 XPC_C_DISCONNECTINGCALLOUT_MADE));
785 				break;
786 			}
787 
788 		} else if (ch->flags & XPC_C_DISCONNECTING) {
789 			break;
790 
791 		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
792 			   atomic_inc_return(&part->nchannels_engaged) == 1) {
793 			xpc_arch_ops.indicate_partition_engaged(part);
794 		}
795 		(void)xpc_part_ref(part);
796 		xpc_msgqueue_ref(ch);
797 
798 		kthread = kthread_run(xpc_kthread_start, (void *)args,
799 				      "xpc%02dc%d", ch->partid, ch->number);
800 		if (IS_ERR(kthread)) {
801 			/* the fork failed */
802 
803 			/*
804 			 * NOTE: if (ignore_disconnecting &&
805 			 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
806 			 * then we'll deadlock if all other kthreads assigned
807 			 * to this channel are blocked in the channel's
808 			 * registerer, because the only thing that will unblock
809 			 * them is the xpDisconnecting callout that this
810 			 * failed kthread_run() would have made.
811 			 */
812 
813 			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
814 			    atomic_dec_return(&part->nchannels_engaged) == 0) {
815 				indicate_partition_disengaged(part);
816 			}
817 			xpc_msgqueue_deref(ch);
818 			xpc_part_deref(part);
819 
820 			if (atomic_read(&ch->kthreads_assigned) <
821 			    ch->kthreads_idle_limit) {
822 				/*
823 				 * Flag this as an error only if we have an
824 				 * insufficient #of kthreads for the channel
825 				 * to function.
826 				 */
827 				spin_lock_irqsave(&ch->lock, irq_flags);
828 				XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
829 						       &irq_flags);
830 				spin_unlock_irqrestore(&ch->lock, irq_flags);
831 			}
832 			break;
833 		}
834 	}
835 }
836 
837 void
xpc_disconnect_wait(int ch_number)838 xpc_disconnect_wait(int ch_number)
839 {
840 	unsigned long irq_flags;
841 	short partid;
842 	struct xpc_partition *part;
843 	struct xpc_channel *ch;
844 	int wakeup_channel_mgr;
845 
846 	/* now wait for all callouts to the caller's function to cease */
847 	for (partid = 0; partid < xp_max_npartitions; partid++) {
848 		part = &xpc_partitions[partid];
849 
850 		if (!xpc_part_ref(part))
851 			continue;
852 
853 		ch = &part->channels[ch_number];
854 
855 		if (!(ch->flags & XPC_C_WDISCONNECT)) {
856 			xpc_part_deref(part);
857 			continue;
858 		}
859 
860 		wait_for_completion(&ch->wdisconnect_wait);
861 
862 		spin_lock_irqsave(&ch->lock, irq_flags);
863 		DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
864 		wakeup_channel_mgr = 0;
865 
866 		if (ch->delayed_chctl_flags) {
867 			if (part->act_state != XPC_P_AS_DEACTIVATING) {
868 				spin_lock(&part->chctl_lock);
869 				part->chctl.flags[ch->number] |=
870 				    ch->delayed_chctl_flags;
871 				spin_unlock(&part->chctl_lock);
872 				wakeup_channel_mgr = 1;
873 			}
874 			ch->delayed_chctl_flags = 0;
875 		}
876 
877 		ch->flags &= ~XPC_C_WDISCONNECT;
878 		spin_unlock_irqrestore(&ch->lock, irq_flags);
879 
880 		if (wakeup_channel_mgr)
881 			xpc_wakeup_channel_mgr(part);
882 
883 		xpc_part_deref(part);
884 	}
885 }
886 
887 static int
xpc_setup_partitions(void)888 xpc_setup_partitions(void)
889 {
890 	short partid;
891 	struct xpc_partition *part;
892 
893 	xpc_partitions = kcalloc(xp_max_npartitions,
894 				 sizeof(struct xpc_partition),
895 				 GFP_KERNEL);
896 	if (xpc_partitions == NULL) {
897 		dev_err(xpc_part, "can't get memory for partition structure\n");
898 		return -ENOMEM;
899 	}
900 
901 	/*
902 	 * The first few fields of each entry of xpc_partitions[] need to
903 	 * be initialized now so that calls to xpc_connect() and
904 	 * xpc_disconnect() can be made prior to the activation of any remote
905 	 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
906 	 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
907 	 * PARTITION HAS BEEN ACTIVATED.
908 	 */
909 	for (partid = 0; partid < xp_max_npartitions; partid++) {
910 		part = &xpc_partitions[partid];
911 
912 		DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
913 
914 		part->activate_IRQ_rcvd = 0;
915 		spin_lock_init(&part->act_lock);
916 		part->act_state = XPC_P_AS_INACTIVE;
917 		XPC_SET_REASON(part, 0, 0);
918 
919 		timer_setup(&part->disengage_timer,
920 			    xpc_timeout_partition_disengage, 0);
921 
922 		part->setup_state = XPC_P_SS_UNSET;
923 		init_waitqueue_head(&part->teardown_wq);
924 		atomic_set(&part->references, 0);
925 	}
926 
927 	return xpc_arch_ops.setup_partitions();
928 }
929 
930 static void
xpc_teardown_partitions(void)931 xpc_teardown_partitions(void)
932 {
933 	xpc_arch_ops.teardown_partitions();
934 	kfree(xpc_partitions);
935 }
936 
937 static void
xpc_do_exit(enum xp_retval reason)938 xpc_do_exit(enum xp_retval reason)
939 {
940 	short partid;
941 	int active_part_count, printed_waiting_msg = 0;
942 	struct xpc_partition *part;
943 	unsigned long printmsg_time, disengage_timeout = 0;
944 
945 	/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
946 	DBUG_ON(xpc_exiting == 1);
947 
948 	/*
949 	 * Let the heartbeat checker thread and the discovery thread
950 	 * (if one is running) know that they should exit. Also wake up
951 	 * the heartbeat checker thread in case it's sleeping.
952 	 */
953 	xpc_exiting = 1;
954 	wake_up_interruptible(&xpc_activate_IRQ_wq);
955 
956 	/* wait for the discovery thread to exit */
957 	wait_for_completion(&xpc_discovery_exited);
958 
959 	/* wait for the heartbeat checker thread to exit */
960 	wait_for_completion(&xpc_hb_checker_exited);
961 
962 	/* sleep for a 1/3 of a second or so */
963 	(void)msleep_interruptible(300);
964 
965 	/* wait for all partitions to become inactive */
966 
967 	printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
968 	xpc_disengage_timedout = 0;
969 
970 	do {
971 		active_part_count = 0;
972 
973 		for (partid = 0; partid < xp_max_npartitions; partid++) {
974 			part = &xpc_partitions[partid];
975 
976 			if (xpc_partition_disengaged(part) &&
977 			    part->act_state == XPC_P_AS_INACTIVE) {
978 				continue;
979 			}
980 
981 			active_part_count++;
982 
983 			XPC_DEACTIVATE_PARTITION(part, reason);
984 
985 			if (part->disengage_timeout > disengage_timeout)
986 				disengage_timeout = part->disengage_timeout;
987 		}
988 
989 		if (xpc_arch_ops.any_partition_engaged()) {
990 			if (time_is_before_jiffies(printmsg_time)) {
991 				dev_info(xpc_part, "waiting for remote "
992 					 "partitions to deactivate, timeout in "
993 					 "%ld seconds\n", (disengage_timeout -
994 					 jiffies) / HZ);
995 				printmsg_time = jiffies +
996 				    (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
997 				printed_waiting_msg = 1;
998 			}
999 
1000 		} else if (active_part_count > 0) {
1001 			if (printed_waiting_msg) {
1002 				dev_info(xpc_part, "waiting for local partition"
1003 					 " to deactivate\n");
1004 				printed_waiting_msg = 0;
1005 			}
1006 
1007 		} else {
1008 			if (!xpc_disengage_timedout) {
1009 				dev_info(xpc_part, "all partitions have "
1010 					 "deactivated\n");
1011 			}
1012 			break;
1013 		}
1014 
1015 		/* sleep for a 1/3 of a second or so */
1016 		(void)msleep_interruptible(300);
1017 
1018 	} while (1);
1019 
1020 	DBUG_ON(xpc_arch_ops.any_partition_engaged());
1021 
1022 	xpc_teardown_rsvd_page();
1023 
1024 	if (reason == xpUnloading) {
1025 		(void)unregister_die_notifier(&xpc_die_notifier);
1026 		(void)unregister_reboot_notifier(&xpc_reboot_notifier);
1027 	}
1028 
1029 	/* clear the interface to XPC's functions */
1030 	xpc_clear_interface();
1031 
1032 	if (xpc_sysctl)
1033 		unregister_sysctl_table(xpc_sysctl);
1034 	if (xpc_sysctl_hb)
1035 		unregister_sysctl_table(xpc_sysctl_hb);
1036 
1037 	xpc_teardown_partitions();
1038 
1039 	if (is_uv_system())
1040 		xpc_exit_uv();
1041 }
1042 
1043 /*
1044  * This function is called when the system is being rebooted.
1045  */
1046 static int
xpc_system_reboot(struct notifier_block * nb,unsigned long event,void * unused)1047 xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
1048 {
1049 	enum xp_retval reason;
1050 
1051 	switch (event) {
1052 	case SYS_RESTART:
1053 		reason = xpSystemReboot;
1054 		break;
1055 	case SYS_HALT:
1056 		reason = xpSystemHalt;
1057 		break;
1058 	case SYS_POWER_OFF:
1059 		reason = xpSystemPoweroff;
1060 		break;
1061 	default:
1062 		reason = xpSystemGoingDown;
1063 	}
1064 
1065 	xpc_do_exit(reason);
1066 	return NOTIFY_DONE;
1067 }
1068 
1069 /* Used to only allow one cpu to complete disconnect */
1070 static unsigned int xpc_die_disconnecting;
1071 
1072 /*
1073  * Notify other partitions to deactivate from us by first disengaging from all
1074  * references to our memory.
1075  */
1076 static void
xpc_die_deactivate(void)1077 xpc_die_deactivate(void)
1078 {
1079 	struct xpc_partition *part;
1080 	short partid;
1081 	int any_engaged;
1082 	long keep_waiting;
1083 	long wait_to_print;
1084 
1085 	if (cmpxchg(&xpc_die_disconnecting, 0, 1))
1086 		return;
1087 
1088 	/* keep xpc_hb_checker thread from doing anything (just in case) */
1089 	xpc_exiting = 1;
1090 
1091 	xpc_arch_ops.disallow_all_hbs();   /*indicate we're deactivated */
1092 
1093 	for (partid = 0; partid < xp_max_npartitions; partid++) {
1094 		part = &xpc_partitions[partid];
1095 
1096 		if (xpc_arch_ops.partition_engaged(partid) ||
1097 		    part->act_state != XPC_P_AS_INACTIVE) {
1098 			xpc_arch_ops.request_partition_deactivation(part);
1099 			xpc_arch_ops.indicate_partition_disengaged(part);
1100 		}
1101 	}
1102 
1103 	/*
1104 	 * Though we requested that all other partitions deactivate from us,
1105 	 * we only wait until they've all disengaged or we've reached the
1106 	 * defined timelimit.
1107 	 *
1108 	 * Given that one iteration through the following while-loop takes
1109 	 * approximately 200 microseconds, calculate the #of loops to take
1110 	 * before bailing and the #of loops before printing a waiting message.
1111 	 */
1112 	keep_waiting = xpc_disengage_timelimit * 1000 * 5;
1113 	wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
1114 
1115 	while (1) {
1116 		any_engaged = xpc_arch_ops.any_partition_engaged();
1117 		if (!any_engaged) {
1118 			dev_info(xpc_part, "all partitions have deactivated\n");
1119 			break;
1120 		}
1121 
1122 		if (!keep_waiting--) {
1123 			for (partid = 0; partid < xp_max_npartitions;
1124 			     partid++) {
1125 				if (xpc_arch_ops.partition_engaged(partid)) {
1126 					dev_info(xpc_part, "deactivate from "
1127 						 "remote partition %d timed "
1128 						 "out\n", partid);
1129 				}
1130 			}
1131 			break;
1132 		}
1133 
1134 		if (!wait_to_print--) {
1135 			dev_info(xpc_part, "waiting for remote partitions to "
1136 				 "deactivate, timeout in %ld seconds\n",
1137 				 keep_waiting / (1000 * 5));
1138 			wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL *
1139 			    1000 * 5;
1140 		}
1141 
1142 		udelay(200);
1143 	}
1144 }
1145 
1146 /*
1147  * This function is called when the system is being restarted or halted due
1148  * to some sort of system failure. If this is the case we need to notify the
1149  * other partitions to disengage from all references to our memory.
1150  * This function can also be called when our heartbeater could be offlined
1151  * for a time. In this case we need to notify other partitions to not worry
1152  * about the lack of a heartbeat.
1153  */
1154 static int
xpc_system_die(struct notifier_block * nb,unsigned long event,void * _die_args)1155 xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
1156 {
1157 	struct die_args *die_args = _die_args;
1158 
1159 	switch (event) {
1160 	case DIE_TRAP:
1161 		if (die_args->trapnr == X86_TRAP_DF)
1162 			xpc_die_deactivate();
1163 
1164 		if (((die_args->trapnr == X86_TRAP_MF) ||
1165 		     (die_args->trapnr == X86_TRAP_XF)) &&
1166 		    !user_mode(die_args->regs))
1167 			xpc_die_deactivate();
1168 
1169 		break;
1170 	case DIE_INT3:
1171 	case DIE_DEBUG:
1172 		break;
1173 	case DIE_OOPS:
1174 	case DIE_GPF:
1175 	default:
1176 		xpc_die_deactivate();
1177 	}
1178 
1179 	return NOTIFY_DONE;
1180 }
1181 
1182 static int __init
xpc_init(void)1183 xpc_init(void)
1184 {
1185 	int ret;
1186 	struct task_struct *kthread;
1187 
1188 	dev_set_name(xpc_part, "part");
1189 	dev_set_name(xpc_chan, "chan");
1190 
1191 	if (is_uv_system()) {
1192 		ret = xpc_init_uv();
1193 
1194 	} else {
1195 		ret = -ENODEV;
1196 	}
1197 
1198 	if (ret != 0)
1199 		return ret;
1200 
1201 	ret = xpc_setup_partitions();
1202 	if (ret != 0) {
1203 		dev_err(xpc_part, "can't get memory for partition structure\n");
1204 		goto out_1;
1205 	}
1206 
1207 	xpc_sysctl = register_sysctl("xpc", xpc_sys_xpc);
1208 	xpc_sysctl_hb = register_sysctl("xpc/hb", xpc_sys_xpc_hb);
1209 
1210 	/*
1211 	 * Fill the partition reserved page with the information needed by
1212 	 * other partitions to discover we are alive and establish initial
1213 	 * communications.
1214 	 */
1215 	ret = xpc_setup_rsvd_page();
1216 	if (ret != 0) {
1217 		dev_err(xpc_part, "can't setup our reserved page\n");
1218 		goto out_2;
1219 	}
1220 
1221 	/* add ourselves to the reboot_notifier_list */
1222 	ret = register_reboot_notifier(&xpc_reboot_notifier);
1223 	if (ret != 0)
1224 		dev_warn(xpc_part, "can't register reboot notifier\n");
1225 
1226 	/* add ourselves to the die_notifier list */
1227 	ret = register_die_notifier(&xpc_die_notifier);
1228 	if (ret != 0)
1229 		dev_warn(xpc_part, "can't register die notifier\n");
1230 
1231 	/*
1232 	 * The real work-horse behind xpc.  This processes incoming
1233 	 * interrupts and monitors remote heartbeats.
1234 	 */
1235 	kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
1236 	if (IS_ERR(kthread)) {
1237 		dev_err(xpc_part, "failed while forking hb check thread\n");
1238 		ret = -EBUSY;
1239 		goto out_3;
1240 	}
1241 
1242 	/*
1243 	 * Startup a thread that will attempt to discover other partitions to
1244 	 * activate based on info provided by SAL. This new thread is short
1245 	 * lived and will exit once discovery is complete.
1246 	 */
1247 	kthread = kthread_run(xpc_initiate_discovery, NULL,
1248 			      XPC_DISCOVERY_THREAD_NAME);
1249 	if (IS_ERR(kthread)) {
1250 		dev_err(xpc_part, "failed while forking discovery thread\n");
1251 
1252 		/* mark this new thread as a non-starter */
1253 		complete(&xpc_discovery_exited);
1254 
1255 		xpc_do_exit(xpUnloading);
1256 		return -EBUSY;
1257 	}
1258 
1259 	/* set the interface to point at XPC's functions */
1260 	xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1261 			  xpc_initiate_send, xpc_initiate_send_notify,
1262 			  xpc_initiate_received, xpc_initiate_partid_to_nasids);
1263 
1264 	return 0;
1265 
1266 	/* initialization was not successful */
1267 out_3:
1268 	xpc_teardown_rsvd_page();
1269 
1270 	(void)unregister_die_notifier(&xpc_die_notifier);
1271 	(void)unregister_reboot_notifier(&xpc_reboot_notifier);
1272 out_2:
1273 	if (xpc_sysctl_hb)
1274 		unregister_sysctl_table(xpc_sysctl_hb);
1275 	if (xpc_sysctl)
1276 		unregister_sysctl_table(xpc_sysctl);
1277 
1278 	xpc_teardown_partitions();
1279 out_1:
1280 	if (is_uv_system())
1281 		xpc_exit_uv();
1282 	return ret;
1283 }
1284 
1285 module_init(xpc_init);
1286 
1287 static void __exit
xpc_exit(void)1288 xpc_exit(void)
1289 {
1290 	xpc_do_exit(xpUnloading);
1291 }
1292 
1293 module_exit(xpc_exit);
1294 
1295 MODULE_AUTHOR("Silicon Graphics, Inc.");
1296 MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
1297 MODULE_LICENSE("GPL");
1298 
1299 module_param(xpc_hb_interval, int, 0);
1300 MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1301 		 "heartbeat increments.");
1302 
1303 module_param(xpc_hb_check_interval, int, 0);
1304 MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1305 		 "heartbeat checks.");
1306 
1307 module_param(xpc_disengage_timelimit, int, 0);
1308 MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
1309 		 "for disengage to complete.");
1310 
1311 module_param(xpc_kdebug_ignore, int, 0);
1312 MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1313 		 "other partitions when dropping into kdebug.");
1314