xref: /linux/drivers/s390/char/sclp.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  *  drivers/s390/char/sclp.c
3  *     core function to access sclp interface
4  *
5  *  S390 version
6  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7  *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
8  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <asm/types.h>
19 #include <asm/s390_ext.h>
20 
21 #include "sclp.h"
22 
23 #define SCLP_HEADER		"sclp: "
24 
25 /* Structure for register_early_external_interrupt. */
26 static ext_int_info_t ext_int_info_hwc;
27 
28 /* Lock to protect internal data consistency. */
29 static DEFINE_SPINLOCK(sclp_lock);
30 
31 /* Mask of events that we can receive from the sclp interface. */
32 static sccb_mask_t sclp_receive_mask;
33 
34 /* Mask of events that we can send to the sclp interface. */
35 static sccb_mask_t sclp_send_mask;
36 
37 /* List of registered event listeners and senders. */
38 static struct list_head sclp_reg_list;
39 
40 /* List of queued requests. */
41 static struct list_head sclp_req_queue;
42 
43 /* Data for read and and init requests. */
44 static struct sclp_req sclp_read_req;
45 static struct sclp_req sclp_init_req;
46 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
47 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
48 
49 /* Timer for request retries. */
50 static struct timer_list sclp_request_timer;
51 
52 /* Internal state: is the driver initialized? */
53 static volatile enum sclp_init_state_t {
54 	sclp_init_state_uninitialized,
55 	sclp_init_state_initializing,
56 	sclp_init_state_initialized
57 } sclp_init_state = sclp_init_state_uninitialized;
58 
59 /* Internal state: is a request active at the sclp? */
60 static volatile enum sclp_running_state_t {
61 	sclp_running_state_idle,
62 	sclp_running_state_running,
63 	sclp_running_state_reset_pending
64 } sclp_running_state = sclp_running_state_idle;
65 
66 /* Internal state: is a read request pending? */
67 static volatile enum sclp_reading_state_t {
68 	sclp_reading_state_idle,
69 	sclp_reading_state_reading
70 } sclp_reading_state = sclp_reading_state_idle;
71 
72 /* Internal state: is the driver currently serving requests? */
73 static volatile enum sclp_activation_state_t {
74 	sclp_activation_state_active,
75 	sclp_activation_state_deactivating,
76 	sclp_activation_state_inactive,
77 	sclp_activation_state_activating
78 } sclp_activation_state = sclp_activation_state_active;
79 
80 /* Internal state: is an init mask request pending? */
81 static volatile enum sclp_mask_state_t {
82 	sclp_mask_state_idle,
83 	sclp_mask_state_initializing
84 } sclp_mask_state = sclp_mask_state_idle;
85 
86 /* Maximum retry counts */
87 #define SCLP_INIT_RETRY		3
88 #define SCLP_MASK_RETRY		3
89 
90 /* Timeout intervals in seconds.*/
91 #define SCLP_BUSY_INTERVAL	10
92 #define SCLP_RETRY_INTERVAL	30
93 
94 static void sclp_process_queue(void);
95 static int sclp_init_mask(int calculate);
96 static int sclp_init(void);
97 
98 /* Perform service call. Return 0 on success, non-zero otherwise. */
99 int
100 sclp_service_call(sclp_cmdw_t command, void *sccb)
101 {
102 	int cc;
103 
104 	asm volatile(
105 		"	.insn	rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
106 		"	ipm	%0\n"
107 		"	srl	%0,28"
108 		: "=&d" (cc) : "d" (command), "a" (__pa(sccb))
109 		: "cc", "memory");
110 	if (cc == 3)
111 		return -EIO;
112 	if (cc == 2)
113 		return -EBUSY;
114 	return 0;
115 }
116 
117 static inline void __sclp_make_read_req(void);
118 
119 static void
120 __sclp_queue_read_req(void)
121 {
122 	if (sclp_reading_state == sclp_reading_state_idle) {
123 		sclp_reading_state = sclp_reading_state_reading;
124 		__sclp_make_read_req();
125 		/* Add request to head of queue */
126 		list_add(&sclp_read_req.list, &sclp_req_queue);
127 	}
128 }
129 
130 /* Set up request retry timer. Called while sclp_lock is locked. */
131 static inline void
132 __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
133 			 unsigned long data)
134 {
135 	del_timer(&sclp_request_timer);
136 	sclp_request_timer.function = function;
137 	sclp_request_timer.data = data;
138 	sclp_request_timer.expires = jiffies + time;
139 	add_timer(&sclp_request_timer);
140 }
141 
142 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
143  * force restart of running request. */
144 static void
145 sclp_request_timeout(unsigned long data)
146 {
147 	unsigned long flags;
148 
149 	spin_lock_irqsave(&sclp_lock, flags);
150 	if (data) {
151 		if (sclp_running_state == sclp_running_state_running) {
152 			/* Break running state and queue NOP read event request
153 			 * to get a defined interface state. */
154 			__sclp_queue_read_req();
155 			sclp_running_state = sclp_running_state_idle;
156 		}
157 	} else {
158 		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
159 					 sclp_request_timeout, 0);
160 	}
161 	spin_unlock_irqrestore(&sclp_lock, flags);
162 	sclp_process_queue();
163 }
164 
165 /* Try to start a request. Return zero if the request was successfully
166  * started or if it will be started at a later time. Return non-zero otherwise.
167  * Called while sclp_lock is locked. */
168 static int
169 __sclp_start_request(struct sclp_req *req)
170 {
171 	int rc;
172 
173 	if (sclp_running_state != sclp_running_state_idle)
174 		return 0;
175 	del_timer(&sclp_request_timer);
176 	rc = sclp_service_call(req->command, req->sccb);
177 	req->start_count++;
178 
179 	if (rc == 0) {
180 		/* Sucessfully started request */
181 		req->status = SCLP_REQ_RUNNING;
182 		sclp_running_state = sclp_running_state_running;
183 		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
184 					 sclp_request_timeout, 1);
185 		return 0;
186 	} else if (rc == -EBUSY) {
187 		/* Try again later */
188 		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
189 					 sclp_request_timeout, 0);
190 		return 0;
191 	}
192 	/* Request failed */
193 	req->status = SCLP_REQ_FAILED;
194 	return rc;
195 }
196 
197 /* Try to start queued requests. */
198 static void
199 sclp_process_queue(void)
200 {
201 	struct sclp_req *req;
202 	int rc;
203 	unsigned long flags;
204 
205 	spin_lock_irqsave(&sclp_lock, flags);
206 	if (sclp_running_state != sclp_running_state_idle) {
207 		spin_unlock_irqrestore(&sclp_lock, flags);
208 		return;
209 	}
210 	del_timer(&sclp_request_timer);
211 	while (!list_empty(&sclp_req_queue)) {
212 		req = list_entry(sclp_req_queue.next, struct sclp_req, list);
213 		rc = __sclp_start_request(req);
214 		if (rc == 0)
215 			break;
216 		/* Request failed */
217 		if (req->start_count > 1) {
218 			/* Cannot abort already submitted request - could still
219 			 * be active at the SCLP */
220 			__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
221 						 sclp_request_timeout, 0);
222 			break;
223 		}
224 		/* Post-processing for aborted request */
225 		list_del(&req->list);
226 		if (req->callback) {
227 			spin_unlock_irqrestore(&sclp_lock, flags);
228 			req->callback(req, req->callback_data);
229 			spin_lock_irqsave(&sclp_lock, flags);
230 		}
231 	}
232 	spin_unlock_irqrestore(&sclp_lock, flags);
233 }
234 
235 /* Queue a new request. Return zero on success, non-zero otherwise. */
236 int
237 sclp_add_request(struct sclp_req *req)
238 {
239 	unsigned long flags;
240 	int rc;
241 
242 	spin_lock_irqsave(&sclp_lock, flags);
243 	if ((sclp_init_state != sclp_init_state_initialized ||
244 	     sclp_activation_state != sclp_activation_state_active) &&
245 	    req != &sclp_init_req) {
246 		spin_unlock_irqrestore(&sclp_lock, flags);
247 		return -EIO;
248 	}
249 	req->status = SCLP_REQ_QUEUED;
250 	req->start_count = 0;
251 	list_add_tail(&req->list, &sclp_req_queue);
252 	rc = 0;
253 	/* Start if request is first in list */
254 	if (sclp_running_state == sclp_running_state_idle &&
255 	    req->list.prev == &sclp_req_queue) {
256 		rc = __sclp_start_request(req);
257 		if (rc)
258 			list_del(&req->list);
259 	}
260 	spin_unlock_irqrestore(&sclp_lock, flags);
261 	return rc;
262 }
263 
264 EXPORT_SYMBOL(sclp_add_request);
265 
266 /* Dispatch events found in request buffer to registered listeners. Return 0
267  * if all events were dispatched, non-zero otherwise. */
268 static int
269 sclp_dispatch_evbufs(struct sccb_header *sccb)
270 {
271 	unsigned long flags;
272 	struct evbuf_header *evbuf;
273 	struct list_head *l;
274 	struct sclp_register *reg;
275 	int offset;
276 	int rc;
277 
278 	spin_lock_irqsave(&sclp_lock, flags);
279 	rc = 0;
280 	for (offset = sizeof(struct sccb_header); offset < sccb->length;
281 	     offset += evbuf->length) {
282 		/* Search for event handler */
283 		evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
284 		reg = NULL;
285 		list_for_each(l, &sclp_reg_list) {
286 			reg = list_entry(l, struct sclp_register, list);
287 			if (reg->receive_mask & (1 << (32 - evbuf->type)))
288 				break;
289 			else
290 				reg = NULL;
291 		}
292 		if (reg && reg->receiver_fn) {
293 			spin_unlock_irqrestore(&sclp_lock, flags);
294 			reg->receiver_fn(evbuf);
295 			spin_lock_irqsave(&sclp_lock, flags);
296 		} else if (reg == NULL)
297 			rc = -ENOSYS;
298 	}
299 	spin_unlock_irqrestore(&sclp_lock, flags);
300 	return rc;
301 }
302 
303 /* Read event data request callback. */
304 static void
305 sclp_read_cb(struct sclp_req *req, void *data)
306 {
307 	unsigned long flags;
308 	struct sccb_header *sccb;
309 
310 	sccb = (struct sccb_header *) req->sccb;
311 	if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
312 	    sccb->response_code == 0x220))
313 		sclp_dispatch_evbufs(sccb);
314 	spin_lock_irqsave(&sclp_lock, flags);
315 	sclp_reading_state = sclp_reading_state_idle;
316 	spin_unlock_irqrestore(&sclp_lock, flags);
317 }
318 
319 /* Prepare read event data request. Called while sclp_lock is locked. */
320 static inline void
321 __sclp_make_read_req(void)
322 {
323 	struct sccb_header *sccb;
324 
325 	sccb = (struct sccb_header *) sclp_read_sccb;
326 	clear_page(sccb);
327 	memset(&sclp_read_req, 0, sizeof(struct sclp_req));
328 	sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
329 	sclp_read_req.status = SCLP_REQ_QUEUED;
330 	sclp_read_req.start_count = 0;
331 	sclp_read_req.callback = sclp_read_cb;
332 	sclp_read_req.sccb = sccb;
333 	sccb->length = PAGE_SIZE;
334 	sccb->function_code = 0;
335 	sccb->control_mask[2] = 0x80;
336 }
337 
338 /* Search request list for request with matching sccb. Return request if found,
339  * NULL otherwise. Called while sclp_lock is locked. */
340 static inline struct sclp_req *
341 __sclp_find_req(u32 sccb)
342 {
343 	struct list_head *l;
344 	struct sclp_req *req;
345 
346 	list_for_each(l, &sclp_req_queue) {
347 		req = list_entry(l, struct sclp_req, list);
348 		if (sccb == (u32) (addr_t) req->sccb)
349 				return req;
350 	}
351 	return NULL;
352 }
353 
354 /* Handler for external interruption. Perform request post-processing.
355  * Prepare read event data request if necessary. Start processing of next
356  * request on queue. */
357 static void
358 sclp_interrupt_handler(__u16 code)
359 {
360 	struct sclp_req *req;
361 	u32 finished_sccb;
362 	u32 evbuf_pending;
363 
364 	spin_lock(&sclp_lock);
365 	finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
366 	evbuf_pending = S390_lowcore.ext_params & 0x3;
367 	if (finished_sccb) {
368 		del_timer(&sclp_request_timer);
369 		sclp_running_state = sclp_running_state_reset_pending;
370 		req = __sclp_find_req(finished_sccb);
371 		if (req) {
372 			/* Request post-processing */
373 			list_del(&req->list);
374 			req->status = SCLP_REQ_DONE;
375 			if (req->callback) {
376 				spin_unlock(&sclp_lock);
377 				req->callback(req, req->callback_data);
378 				spin_lock(&sclp_lock);
379 			}
380 		}
381 		sclp_running_state = sclp_running_state_idle;
382 	}
383 	if (evbuf_pending && sclp_receive_mask != 0 &&
384 	    sclp_activation_state == sclp_activation_state_active)
385 		__sclp_queue_read_req();
386 	spin_unlock(&sclp_lock);
387 	sclp_process_queue();
388 }
389 
390 /* Convert interval in jiffies to TOD ticks. */
391 static inline u64
392 sclp_tod_from_jiffies(unsigned long jiffies)
393 {
394 	return (u64) (jiffies / HZ) << 32;
395 }
396 
397 /* Wait until a currently running request finished. Note: while this function
398  * is running, no timers are served on the calling CPU. */
399 void
400 sclp_sync_wait(void)
401 {
402 	unsigned long flags;
403 	unsigned long cr0, cr0_sync;
404 	u64 timeout;
405 	int irq_context;
406 
407 	/* We'll be disabling timer interrupts, so we need a custom timeout
408 	 * mechanism */
409 	timeout = 0;
410 	if (timer_pending(&sclp_request_timer)) {
411 		/* Get timeout TOD value */
412 		timeout = get_clock() +
413 			  sclp_tod_from_jiffies(sclp_request_timer.expires -
414 						jiffies);
415 	}
416 	local_irq_save(flags);
417 	/* Prevent bottom half from executing once we force interrupts open */
418 	irq_context = in_interrupt();
419 	if (!irq_context)
420 		local_bh_disable();
421 	/* Enable service-signal interruption, disable timer interrupts */
422 	trace_hardirqs_on();
423 	__ctl_store(cr0, 0, 0);
424 	cr0_sync = cr0;
425 	cr0_sync |= 0x00000200;
426 	cr0_sync &= 0xFFFFF3AC;
427 	__ctl_load(cr0_sync, 0, 0);
428 	__raw_local_irq_stosm(0x01);
429 	/* Loop until driver state indicates finished request */
430 	while (sclp_running_state != sclp_running_state_idle) {
431 		/* Check for expired request timer */
432 		if (timer_pending(&sclp_request_timer) &&
433 		    get_clock() > timeout &&
434 		    del_timer(&sclp_request_timer))
435 			sclp_request_timer.function(sclp_request_timer.data);
436 		cpu_relax();
437 	}
438 	local_irq_disable();
439 	__ctl_load(cr0, 0, 0);
440 	if (!irq_context)
441 		_local_bh_enable();
442 	local_irq_restore(flags);
443 }
444 
445 EXPORT_SYMBOL(sclp_sync_wait);
446 
447 /* Dispatch changes in send and receive mask to registered listeners. */
448 static void
449 sclp_dispatch_state_change(void)
450 {
451 	struct list_head *l;
452 	struct sclp_register *reg;
453 	unsigned long flags;
454 	sccb_mask_t receive_mask;
455 	sccb_mask_t send_mask;
456 
457 	do {
458 		spin_lock_irqsave(&sclp_lock, flags);
459 		reg = NULL;
460 		list_for_each(l, &sclp_reg_list) {
461 			reg = list_entry(l, struct sclp_register, list);
462 			receive_mask = reg->receive_mask & sclp_receive_mask;
463 			send_mask = reg->send_mask & sclp_send_mask;
464 			if (reg->sclp_receive_mask != receive_mask ||
465 			    reg->sclp_send_mask != send_mask) {
466 				reg->sclp_receive_mask = receive_mask;
467 				reg->sclp_send_mask = send_mask;
468 				break;
469 			} else
470 				reg = NULL;
471 		}
472 		spin_unlock_irqrestore(&sclp_lock, flags);
473 		if (reg && reg->state_change_fn)
474 			reg->state_change_fn(reg);
475 	} while (reg);
476 }
477 
478 struct sclp_statechangebuf {
479 	struct evbuf_header	header;
480 	u8		validity_sclp_active_facility_mask : 1;
481 	u8		validity_sclp_receive_mask : 1;
482 	u8		validity_sclp_send_mask : 1;
483 	u8		validity_read_data_function_mask : 1;
484 	u16		_zeros : 12;
485 	u16		mask_length;
486 	u64		sclp_active_facility_mask;
487 	sccb_mask_t	sclp_receive_mask;
488 	sccb_mask_t	sclp_send_mask;
489 	u32		read_data_function_mask;
490 } __attribute__((packed));
491 
492 
493 /* State change event callback. Inform listeners of changes. */
494 static void
495 sclp_state_change_cb(struct evbuf_header *evbuf)
496 {
497 	unsigned long flags;
498 	struct sclp_statechangebuf *scbuf;
499 
500 	scbuf = (struct sclp_statechangebuf *) evbuf;
501 	if (scbuf->mask_length != sizeof(sccb_mask_t))
502 		return;
503 	spin_lock_irqsave(&sclp_lock, flags);
504 	if (scbuf->validity_sclp_receive_mask)
505 		sclp_receive_mask = scbuf->sclp_receive_mask;
506 	if (scbuf->validity_sclp_send_mask)
507 		sclp_send_mask = scbuf->sclp_send_mask;
508 	spin_unlock_irqrestore(&sclp_lock, flags);
509 	sclp_dispatch_state_change();
510 }
511 
512 static struct sclp_register sclp_state_change_event = {
513 	.receive_mask = EvTyp_StateChange_Mask,
514 	.receiver_fn = sclp_state_change_cb
515 };
516 
517 /* Calculate receive and send mask of currently registered listeners.
518  * Called while sclp_lock is locked. */
519 static inline void
520 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
521 {
522 	struct list_head *l;
523 	struct sclp_register *t;
524 
525 	*receive_mask = 0;
526 	*send_mask = 0;
527 	list_for_each(l, &sclp_reg_list) {
528 		t = list_entry(l, struct sclp_register, list);
529 		*receive_mask |= t->receive_mask;
530 		*send_mask |= t->send_mask;
531 	}
532 }
533 
534 /* Register event listener. Return 0 on success, non-zero otherwise. */
535 int
536 sclp_register(struct sclp_register *reg)
537 {
538 	unsigned long flags;
539 	sccb_mask_t receive_mask;
540 	sccb_mask_t send_mask;
541 	int rc;
542 
543 	rc = sclp_init();
544 	if (rc)
545 		return rc;
546 	spin_lock_irqsave(&sclp_lock, flags);
547 	/* Check event mask for collisions */
548 	__sclp_get_mask(&receive_mask, &send_mask);
549 	if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
550 		spin_unlock_irqrestore(&sclp_lock, flags);
551 		return -EBUSY;
552 	}
553 	/* Trigger initial state change callback */
554 	reg->sclp_receive_mask = 0;
555 	reg->sclp_send_mask = 0;
556 	list_add(&reg->list, &sclp_reg_list);
557 	spin_unlock_irqrestore(&sclp_lock, flags);
558 	rc = sclp_init_mask(1);
559 	if (rc) {
560 		spin_lock_irqsave(&sclp_lock, flags);
561 		list_del(&reg->list);
562 		spin_unlock_irqrestore(&sclp_lock, flags);
563 	}
564 	return rc;
565 }
566 
567 EXPORT_SYMBOL(sclp_register);
568 
569 /* Unregister event listener. */
570 void
571 sclp_unregister(struct sclp_register *reg)
572 {
573 	unsigned long flags;
574 
575 	spin_lock_irqsave(&sclp_lock, flags);
576 	list_del(&reg->list);
577 	spin_unlock_irqrestore(&sclp_lock, flags);
578 	sclp_init_mask(1);
579 }
580 
581 EXPORT_SYMBOL(sclp_unregister);
582 
583 /* Remove event buffers which are marked processed. Return the number of
584  * remaining event buffers. */
585 int
586 sclp_remove_processed(struct sccb_header *sccb)
587 {
588 	struct evbuf_header *evbuf;
589 	int unprocessed;
590 	u16 remaining;
591 
592 	evbuf = (struct evbuf_header *) (sccb + 1);
593 	unprocessed = 0;
594 	remaining = sccb->length - sizeof(struct sccb_header);
595 	while (remaining > 0) {
596 		remaining -= evbuf->length;
597 		if (evbuf->flags & 0x80) {
598 			sccb->length -= evbuf->length;
599 			memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
600 			       remaining);
601 		} else {
602 			unprocessed++;
603 			evbuf = (struct evbuf_header *)
604 					((addr_t) evbuf + evbuf->length);
605 		}
606 	}
607 	return unprocessed;
608 }
609 
610 EXPORT_SYMBOL(sclp_remove_processed);
611 
612 struct init_sccb {
613 	struct sccb_header header;
614 	u16 _reserved;
615 	u16 mask_length;
616 	sccb_mask_t receive_mask;
617 	sccb_mask_t send_mask;
618 	sccb_mask_t sclp_send_mask;
619 	sccb_mask_t sclp_receive_mask;
620 } __attribute__((packed));
621 
622 /* Prepare init mask request. Called while sclp_lock is locked. */
623 static inline void
624 __sclp_make_init_req(u32 receive_mask, u32 send_mask)
625 {
626 	struct init_sccb *sccb;
627 
628 	sccb = (struct init_sccb *) sclp_init_sccb;
629 	clear_page(sccb);
630 	memset(&sclp_init_req, 0, sizeof(struct sclp_req));
631 	sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
632 	sclp_init_req.status = SCLP_REQ_FILLED;
633 	sclp_init_req.start_count = 0;
634 	sclp_init_req.callback = NULL;
635 	sclp_init_req.callback_data = NULL;
636 	sclp_init_req.sccb = sccb;
637 	sccb->header.length = sizeof(struct init_sccb);
638 	sccb->mask_length = sizeof(sccb_mask_t);
639 	sccb->receive_mask = receive_mask;
640 	sccb->send_mask = send_mask;
641 	sccb->sclp_receive_mask = 0;
642 	sccb->sclp_send_mask = 0;
643 }
644 
645 /* Start init mask request. If calculate is non-zero, calculate the mask as
646  * requested by registered listeners. Use zero mask otherwise. Return 0 on
647  * success, non-zero otherwise. */
648 static int
649 sclp_init_mask(int calculate)
650 {
651 	unsigned long flags;
652 	struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
653 	sccb_mask_t receive_mask;
654 	sccb_mask_t send_mask;
655 	int retry;
656 	int rc;
657 	unsigned long wait;
658 
659 	spin_lock_irqsave(&sclp_lock, flags);
660 	/* Check if interface is in appropriate state */
661 	if (sclp_mask_state != sclp_mask_state_idle) {
662 		spin_unlock_irqrestore(&sclp_lock, flags);
663 		return -EBUSY;
664 	}
665 	if (sclp_activation_state == sclp_activation_state_inactive) {
666 		spin_unlock_irqrestore(&sclp_lock, flags);
667 		return -EINVAL;
668 	}
669 	sclp_mask_state = sclp_mask_state_initializing;
670 	/* Determine mask */
671 	if (calculate)
672 		__sclp_get_mask(&receive_mask, &send_mask);
673 	else {
674 		receive_mask = 0;
675 		send_mask = 0;
676 	}
677 	rc = -EIO;
678 	for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
679 		/* Prepare request */
680 		__sclp_make_init_req(receive_mask, send_mask);
681 		spin_unlock_irqrestore(&sclp_lock, flags);
682 		if (sclp_add_request(&sclp_init_req)) {
683 			/* Try again later */
684 			wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
685 			while (time_before(jiffies, wait))
686 				sclp_sync_wait();
687 			spin_lock_irqsave(&sclp_lock, flags);
688 			continue;
689 		}
690 		while (sclp_init_req.status != SCLP_REQ_DONE &&
691 		       sclp_init_req.status != SCLP_REQ_FAILED)
692 			sclp_sync_wait();
693 		spin_lock_irqsave(&sclp_lock, flags);
694 		if (sclp_init_req.status == SCLP_REQ_DONE &&
695 		    sccb->header.response_code == 0x20) {
696 			/* Successful request */
697 			if (calculate) {
698 				sclp_receive_mask = sccb->sclp_receive_mask;
699 				sclp_send_mask = sccb->sclp_send_mask;
700 			} else {
701 				sclp_receive_mask = 0;
702 				sclp_send_mask = 0;
703 			}
704 			spin_unlock_irqrestore(&sclp_lock, flags);
705 			sclp_dispatch_state_change();
706 			spin_lock_irqsave(&sclp_lock, flags);
707 			rc = 0;
708 			break;
709 		}
710 	}
711 	sclp_mask_state = sclp_mask_state_idle;
712 	spin_unlock_irqrestore(&sclp_lock, flags);
713 	return rc;
714 }
715 
716 /* Deactivate SCLP interface. On success, new requests will be rejected,
717  * events will no longer be dispatched. Return 0 on success, non-zero
718  * otherwise. */
719 int
720 sclp_deactivate(void)
721 {
722 	unsigned long flags;
723 	int rc;
724 
725 	spin_lock_irqsave(&sclp_lock, flags);
726 	/* Deactivate can only be called when active */
727 	if (sclp_activation_state != sclp_activation_state_active) {
728 		spin_unlock_irqrestore(&sclp_lock, flags);
729 		return -EINVAL;
730 	}
731 	sclp_activation_state = sclp_activation_state_deactivating;
732 	spin_unlock_irqrestore(&sclp_lock, flags);
733 	rc = sclp_init_mask(0);
734 	spin_lock_irqsave(&sclp_lock, flags);
735 	if (rc == 0)
736 		sclp_activation_state = sclp_activation_state_inactive;
737 	else
738 		sclp_activation_state = sclp_activation_state_active;
739 	spin_unlock_irqrestore(&sclp_lock, flags);
740 	return rc;
741 }
742 
743 EXPORT_SYMBOL(sclp_deactivate);
744 
745 /* Reactivate SCLP interface after sclp_deactivate. On success, new
746  * requests will be accepted, events will be dispatched again. Return 0 on
747  * success, non-zero otherwise. */
748 int
749 sclp_reactivate(void)
750 {
751 	unsigned long flags;
752 	int rc;
753 
754 	spin_lock_irqsave(&sclp_lock, flags);
755 	/* Reactivate can only be called when inactive */
756 	if (sclp_activation_state != sclp_activation_state_inactive) {
757 		spin_unlock_irqrestore(&sclp_lock, flags);
758 		return -EINVAL;
759 	}
760 	sclp_activation_state = sclp_activation_state_activating;
761 	spin_unlock_irqrestore(&sclp_lock, flags);
762 	rc = sclp_init_mask(1);
763 	spin_lock_irqsave(&sclp_lock, flags);
764 	if (rc == 0)
765 		sclp_activation_state = sclp_activation_state_active;
766 	else
767 		sclp_activation_state = sclp_activation_state_inactive;
768 	spin_unlock_irqrestore(&sclp_lock, flags);
769 	return rc;
770 }
771 
772 EXPORT_SYMBOL(sclp_reactivate);
773 
774 /* Handler for external interruption used during initialization. Modify
775  * request state to done. */
776 static void
777 sclp_check_handler(__u16 code)
778 {
779 	u32 finished_sccb;
780 
781 	finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
782 	/* Is this the interrupt we are waiting for? */
783 	if (finished_sccb == 0)
784 		return;
785 	if (finished_sccb != (u32) (addr_t) sclp_init_sccb) {
786 		printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt "
787 		       "for buffer at 0x%x\n", finished_sccb);
788 		return;
789 	}
790 	spin_lock(&sclp_lock);
791 	if (sclp_running_state == sclp_running_state_running) {
792 		sclp_init_req.status = SCLP_REQ_DONE;
793 		sclp_running_state = sclp_running_state_idle;
794 	}
795 	spin_unlock(&sclp_lock);
796 }
797 
798 /* Initial init mask request timed out. Modify request state to failed. */
799 static void
800 sclp_check_timeout(unsigned long data)
801 {
802 	unsigned long flags;
803 
804 	spin_lock_irqsave(&sclp_lock, flags);
805 	if (sclp_running_state == sclp_running_state_running) {
806 		sclp_init_req.status = SCLP_REQ_FAILED;
807 		sclp_running_state = sclp_running_state_idle;
808 	}
809 	spin_unlock_irqrestore(&sclp_lock, flags);
810 }
811 
812 /* Perform a check of the SCLP interface. Return zero if the interface is
813  * available and there are no pending requests from a previous instance.
814  * Return non-zero otherwise. */
815 static int
816 sclp_check_interface(void)
817 {
818 	struct init_sccb *sccb;
819 	unsigned long flags;
820 	int retry;
821 	int rc;
822 
823 	spin_lock_irqsave(&sclp_lock, flags);
824 	/* Prepare init mask command */
825 	rc = register_early_external_interrupt(0x2401, sclp_check_handler,
826 					       &ext_int_info_hwc);
827 	if (rc) {
828 		spin_unlock_irqrestore(&sclp_lock, flags);
829 		return rc;
830 	}
831 	for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
832 		__sclp_make_init_req(0, 0);
833 		sccb = (struct init_sccb *) sclp_init_req.sccb;
834 		rc = sclp_service_call(sclp_init_req.command, sccb);
835 		if (rc == -EIO)
836 			break;
837 		sclp_init_req.status = SCLP_REQ_RUNNING;
838 		sclp_running_state = sclp_running_state_running;
839 		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
840 					 sclp_check_timeout, 0);
841 		spin_unlock_irqrestore(&sclp_lock, flags);
842 		/* Enable service-signal interruption - needs to happen
843 		 * with IRQs enabled. */
844 		ctl_set_bit(0, 9);
845 		/* Wait for signal from interrupt or timeout */
846 		sclp_sync_wait();
847 		/* Disable service-signal interruption - needs to happen
848 		 * with IRQs enabled. */
849 		ctl_clear_bit(0,9);
850 		spin_lock_irqsave(&sclp_lock, flags);
851 		del_timer(&sclp_request_timer);
852 		if (sclp_init_req.status == SCLP_REQ_DONE &&
853 		    sccb->header.response_code == 0x20) {
854 			rc = 0;
855 			break;
856 		} else
857 			rc = -EBUSY;
858 	}
859 	unregister_early_external_interrupt(0x2401, sclp_check_handler,
860 					    &ext_int_info_hwc);
861 	spin_unlock_irqrestore(&sclp_lock, flags);
862 	return rc;
863 }
864 
865 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
866  * events from interfering with rebooted system. */
867 static int
868 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
869 {
870 	sclp_deactivate();
871 	return NOTIFY_DONE;
872 }
873 
874 static struct notifier_block sclp_reboot_notifier = {
875 	.notifier_call = sclp_reboot_event
876 };
877 
878 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
879  * otherwise. */
880 static int
881 sclp_init(void)
882 {
883 	unsigned long flags;
884 	int rc;
885 
886 	if (!MACHINE_HAS_SCLP)
887 		return -ENODEV;
888 	spin_lock_irqsave(&sclp_lock, flags);
889 	/* Check for previous or running initialization */
890 	if (sclp_init_state != sclp_init_state_uninitialized) {
891 		spin_unlock_irqrestore(&sclp_lock, flags);
892 		return 0;
893 	}
894 	sclp_init_state = sclp_init_state_initializing;
895 	/* Set up variables */
896 	INIT_LIST_HEAD(&sclp_req_queue);
897 	INIT_LIST_HEAD(&sclp_reg_list);
898 	list_add(&sclp_state_change_event.list, &sclp_reg_list);
899 	init_timer(&sclp_request_timer);
900 	/* Check interface */
901 	spin_unlock_irqrestore(&sclp_lock, flags);
902 	rc = sclp_check_interface();
903 	spin_lock_irqsave(&sclp_lock, flags);
904 	if (rc) {
905 		sclp_init_state = sclp_init_state_uninitialized;
906 		spin_unlock_irqrestore(&sclp_lock, flags);
907 		return rc;
908 	}
909 	/* Register reboot handler */
910 	rc = register_reboot_notifier(&sclp_reboot_notifier);
911 	if (rc) {
912 		sclp_init_state = sclp_init_state_uninitialized;
913 		spin_unlock_irqrestore(&sclp_lock, flags);
914 		return rc;
915 	}
916 	/* Register interrupt handler */
917 	rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
918 					       &ext_int_info_hwc);
919 	if (rc) {
920 		unregister_reboot_notifier(&sclp_reboot_notifier);
921 		sclp_init_state = sclp_init_state_uninitialized;
922 		spin_unlock_irqrestore(&sclp_lock, flags);
923 		return rc;
924 	}
925 	sclp_init_state = sclp_init_state_initialized;
926 	spin_unlock_irqrestore(&sclp_lock, flags);
927 	/* Enable service-signal external interruption - needs to happen with
928 	 * IRQs enabled. */
929 	ctl_set_bit(0, 9);
930 	sclp_init_mask(1);
931 	return 0;
932 }
933