xref: /linux/drivers/s390/char/sclp.c (revision 3e93d5bbcbfc3808f83712c0701f9d4c148cc8ed)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * core function to access sclp interface
4  *
5  * Copyright IBM Corp. 1999, 2009
6  *
7  * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8  *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
9  */
10 
11 #include <linux/kernel_stat.h>
12 #include <linux/export.h>
13 #include <linux/module.h>
14 #include <linux/err.h>
15 #include <linux/panic_notifier.h>
16 #include <linux/spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/timer.h>
19 #include <linux/reboot.h>
20 #include <linux/jiffies.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <asm/types.h>
24 #include <asm/irq.h>
25 #include <asm/debug.h>
26 
27 #include "sclp.h"
28 
29 #define SCLP_HEADER		"sclp: "
30 
31 struct sclp_trace_entry {
32 	char id[4] __nonstring;
33 	u32 a;
34 	u64 b;
35 };
36 
37 #define SCLP_TRACE_ENTRY_SIZE		sizeof(struct sclp_trace_entry)
38 #define SCLP_TRACE_MAX_SIZE		128
39 #define SCLP_TRACE_EVENT_MAX_SIZE	64
40 
41 /* Debug trace area intended for all entries in abbreviated form. */
42 DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE,
43 			 &debug_hex_ascii_view);
44 
45 /* Error trace area intended for full entries relating to failed requests. */
46 DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1,
47 			 SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view);
48 
49 /* Lock to protect internal data consistency. */
50 static DEFINE_SPINLOCK(sclp_lock);
51 
52 /* Mask of events that we can send to the sclp interface. */
53 static sccb_mask_t sclp_receive_mask;
54 
55 /* Mask of events that we can receive from the sclp interface. */
56 static sccb_mask_t sclp_send_mask;
57 
58 /* List of registered event listeners and senders. */
59 static LIST_HEAD(sclp_reg_list);
60 
61 /* List of queued requests. */
62 static LIST_HEAD(sclp_req_queue);
63 
64 /* Data for read and init requests. */
65 static struct sclp_req sclp_read_req;
66 static struct sclp_req sclp_init_req;
67 static void *sclp_read_sccb;
68 static struct init_sccb *sclp_init_sccb;
69 
70 /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
71 int sclp_console_pages = SCLP_CONSOLE_PAGES;
72 /* Flag to indicate if buffer pages are dropped on buffer full condition */
73 bool sclp_console_drop = true;
74 /* Number of times the console dropped buffer pages */
75 unsigned long sclp_console_full;
76 
77 /* The currently active SCLP command word. */
78 static sclp_cmdw_t active_cmd;
79 
sclpint_to_sccb(u32 sccb_int)80 static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int)
81 {
82 	if (sccb_int)
83 		return __va(sccb_int);
84 	return NULL;
85 }
86 
sclp_trace(int prio,char * id,u32 a,u64 b,bool err)87 static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
88 {
89 	struct sclp_trace_entry e;
90 
91 	memset(&e, 0, sizeof(e));
92 	strtomem(e.id, id);
93 	e.a = a;
94 	e.b = b;
95 	debug_event(&sclp_debug, prio, &e, sizeof(e));
96 	if (err)
97 		debug_event(&sclp_debug_err, 0, &e, sizeof(e));
98 }
99 
no_zeroes_len(void * data,int len)100 static inline int no_zeroes_len(void *data, int len)
101 {
102 	char *d = data;
103 
104 	/* Minimize trace area usage by not tracing trailing zeroes. */
105 	while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0)
106 		len--;
107 
108 	return len;
109 }
110 
sclp_trace_bin(int prio,void * d,int len,int errlen)111 static inline void sclp_trace_bin(int prio, void *d, int len, int errlen)
112 {
113 	debug_event(&sclp_debug, prio, d, no_zeroes_len(d, len));
114 	if (errlen)
115 		debug_event(&sclp_debug_err, 0, d, no_zeroes_len(d, errlen));
116 }
117 
abbrev_len(sclp_cmdw_t cmd,struct sccb_header * sccb)118 static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb)
119 {
120 	struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1);
121 	int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE;
122 
123 	/* Full SCCB tracing if debug level is set to max. */
124 	if (sclp_debug.level == DEBUG_MAX_LEVEL)
125 		return len;
126 
127 	/* Minimal tracing for console writes. */
128 	if (cmd == SCLP_CMDW_WRITE_EVENT_DATA &&
129 	    (evbuf->type == EVTYP_MSG  || evbuf->type == EVTYP_VT220MSG))
130 		limit = SCLP_TRACE_ENTRY_SIZE;
131 
132 	return min(len, limit);
133 }
134 
sclp_trace_sccb(int prio,char * id,u32 a,u64 b,sclp_cmdw_t cmd,struct sccb_header * sccb,bool err)135 static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b,
136 				   sclp_cmdw_t cmd, struct sccb_header *sccb,
137 				   bool err)
138 {
139 	sclp_trace(prio, id, a, b, err);
140 	if (sccb) {
141 		sclp_trace_bin(prio + 1, sccb, abbrev_len(cmd, sccb),
142 			       err ? sccb->length : 0);
143 	}
144 }
145 
sclp_trace_evbuf(int prio,char * id,u32 a,u64 b,struct evbuf_header * evbuf,bool err)146 static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b,
147 				    struct evbuf_header *evbuf, bool err)
148 {
149 	sclp_trace(prio, id, a, b, err);
150 	sclp_trace_bin(prio + 1, evbuf,
151 		       min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE),
152 		       err ? evbuf->length : 0);
153 }
154 
sclp_trace_req(int prio,char * id,struct sclp_req * req,bool err)155 static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req,
156 				  bool err)
157 {
158 	struct sccb_header *sccb = req->sccb;
159 	union {
160 		struct {
161 			u16 status;
162 			u16 response;
163 			u16 timeout;
164 			u16 start_count;
165 		};
166 		u64 b;
167 	} summary;
168 
169 	summary.status = req->status;
170 	summary.response = sccb ? sccb->response_code : 0;
171 	summary.timeout = (u16)req->queue_timeout;
172 	summary.start_count = (u16)req->start_count;
173 
174 	sclp_trace(prio, id, __pa(sccb), summary.b, err);
175 }
176 
sclp_trace_register(int prio,char * id,u32 a,u64 b,struct sclp_register * reg)177 static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b,
178 				       struct sclp_register *reg)
179 {
180 	struct {
181 		u64 receive;
182 		u64 send;
183 	} d;
184 
185 	d.receive = reg->receive_mask;
186 	d.send = reg->send_mask;
187 
188 	sclp_trace(prio, id, a, b, false);
189 	sclp_trace_bin(prio, &d, sizeof(d), 0);
190 }
191 
sclp_setup_console_pages(char * str)192 static int __init sclp_setup_console_pages(char *str)
193 {
194 	int pages, rc;
195 
196 	rc = kstrtoint(str, 0, &pages);
197 	if (!rc && pages >= SCLP_CONSOLE_PAGES)
198 		sclp_console_pages = pages;
199 	return 1;
200 }
201 
202 __setup("sclp_con_pages=", sclp_setup_console_pages);
203 
sclp_setup_console_drop(char * str)204 static int __init sclp_setup_console_drop(char *str)
205 {
206 	return kstrtobool(str, &sclp_console_drop) == 0;
207 }
208 
209 __setup("sclp_con_drop=", sclp_setup_console_drop);
210 
211 /* Timer for request retries. */
212 static struct timer_list sclp_request_timer;
213 
214 /* Timer for queued requests. */
215 static struct timer_list sclp_queue_timer;
216 
217 /* Internal state: is a request active at the sclp? */
218 static volatile enum sclp_running_state_t {
219 	sclp_running_state_idle,
220 	sclp_running_state_running,
221 	sclp_running_state_reset_pending
222 } sclp_running_state = sclp_running_state_idle;
223 
224 /* Internal state: is a read request pending? */
225 static volatile enum sclp_reading_state_t {
226 	sclp_reading_state_idle,
227 	sclp_reading_state_reading
228 } sclp_reading_state = sclp_reading_state_idle;
229 
230 /* Internal state: is the driver currently serving requests? */
231 static volatile enum sclp_activation_state_t {
232 	sclp_activation_state_active,
233 	sclp_activation_state_deactivating,
234 	sclp_activation_state_inactive,
235 	sclp_activation_state_activating
236 } sclp_activation_state = sclp_activation_state_active;
237 
238 /* Internal state: is an init mask request pending? */
239 static volatile enum sclp_mask_state_t {
240 	sclp_mask_state_idle,
241 	sclp_mask_state_initializing
242 } sclp_mask_state = sclp_mask_state_idle;
243 
244 /* Maximum retry counts */
245 #define SCLP_INIT_RETRY		3
246 #define SCLP_MASK_RETRY		3
247 
248 /* Timeout intervals in seconds.*/
249 #define SCLP_BUSY_INTERVAL	10
250 #define SCLP_RETRY_INTERVAL	30
251 
252 static void sclp_request_timeout(bool force_restart);
253 static void sclp_process_queue(void);
254 static void __sclp_make_read_req(void);
255 static int sclp_init_mask(int calculate);
256 
257 static void
__sclp_queue_read_req(void)258 __sclp_queue_read_req(void)
259 {
260 	if (sclp_reading_state == sclp_reading_state_idle) {
261 		sclp_reading_state = sclp_reading_state_reading;
262 		__sclp_make_read_req();
263 		/* Add request to head of queue */
264 		list_add(&sclp_read_req.list, &sclp_req_queue);
265 	}
266 }
267 
268 /* Set up request retry timer. Called while sclp_lock is locked. */
269 static inline void
__sclp_set_request_timer(unsigned long time,void (* cb)(struct timer_list *))270 __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
271 {
272 	timer_delete(&sclp_request_timer);
273 	sclp_request_timer.function = cb;
274 	sclp_request_timer.expires = jiffies + time;
275 	add_timer(&sclp_request_timer);
276 }
277 
sclp_request_timeout_restart(struct timer_list * unused)278 static void sclp_request_timeout_restart(struct timer_list *unused)
279 {
280 	sclp_request_timeout(true);
281 }
282 
sclp_request_timeout_normal(struct timer_list * unused)283 static void sclp_request_timeout_normal(struct timer_list *unused)
284 {
285 	sclp_request_timeout(false);
286 }
287 
288 /* Request timeout handler. Restart the request queue. If force_restart,
289  * force restart of running request. */
sclp_request_timeout(bool force_restart)290 static void sclp_request_timeout(bool force_restart)
291 {
292 	unsigned long flags;
293 
294 	/* TMO: A timeout occurred (a=force_restart) */
295 	sclp_trace(2, "TMO", force_restart, 0, true);
296 
297 	spin_lock_irqsave(&sclp_lock, flags);
298 	if (force_restart) {
299 		if (sclp_running_state == sclp_running_state_running) {
300 			/* Break running state and queue NOP read event request
301 			 * to get a defined interface state. */
302 			__sclp_queue_read_req();
303 			sclp_running_state = sclp_running_state_idle;
304 		}
305 	} else {
306 		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
307 					 sclp_request_timeout_normal);
308 	}
309 	spin_unlock_irqrestore(&sclp_lock, flags);
310 	sclp_process_queue();
311 }
312 
313 /*
314  * Returns the expire value in jiffies of the next pending request timeout,
315  * if any. Needs to be called with sclp_lock.
316  */
__sclp_req_queue_find_next_timeout(void)317 static unsigned long __sclp_req_queue_find_next_timeout(void)
318 {
319 	unsigned long expires_next = 0;
320 	struct sclp_req *req;
321 
322 	list_for_each_entry(req, &sclp_req_queue, list) {
323 		if (!req->queue_expires)
324 			continue;
325 		if (!expires_next ||
326 		   (time_before(req->queue_expires, expires_next)))
327 				expires_next = req->queue_expires;
328 	}
329 	return expires_next;
330 }
331 
332 /*
333  * Returns expired request, if any, and removes it from the list.
334  */
__sclp_req_queue_remove_expired_req(void)335 static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
336 {
337 	unsigned long flags, now;
338 	struct sclp_req *req;
339 
340 	spin_lock_irqsave(&sclp_lock, flags);
341 	now = jiffies;
342 	/* Don't need list_for_each_safe because we break out after list_del */
343 	list_for_each_entry(req, &sclp_req_queue, list) {
344 		if (!req->queue_expires)
345 			continue;
346 		if (time_before_eq(req->queue_expires, now)) {
347 			if (req->status == SCLP_REQ_QUEUED) {
348 				req->status = SCLP_REQ_QUEUED_TIMEOUT;
349 				list_del(&req->list);
350 				goto out;
351 			}
352 		}
353 	}
354 	req = NULL;
355 out:
356 	spin_unlock_irqrestore(&sclp_lock, flags);
357 	return req;
358 }
359 
360 /*
361  * Timeout handler for queued requests. Removes request from list and
362  * invokes callback. This timer can be set per request in situations where
363  * waiting too long would be harmful to the system, e.g. during SE reboot.
364  */
sclp_req_queue_timeout(struct timer_list * unused)365 static void sclp_req_queue_timeout(struct timer_list *unused)
366 {
367 	unsigned long flags, expires_next;
368 	struct sclp_req *req;
369 
370 	do {
371 		req = __sclp_req_queue_remove_expired_req();
372 
373 		if (req) {
374 			/* RQTM: Request timed out (a=sccb, b=summary) */
375 			sclp_trace_req(2, "RQTM", req, true);
376 		}
377 
378 		if (req && req->callback)
379 			req->callback(req, req->callback_data);
380 	} while (req);
381 
382 	spin_lock_irqsave(&sclp_lock, flags);
383 	expires_next = __sclp_req_queue_find_next_timeout();
384 	if (expires_next)
385 		mod_timer(&sclp_queue_timer, expires_next);
386 	spin_unlock_irqrestore(&sclp_lock, flags);
387 }
388 
sclp_service_call_trace(sclp_cmdw_t command,void * sccb)389 static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb)
390 {
391 	static u64 srvc_count;
392 	int rc;
393 
394 	/* SRV1: Service call about to be issued (a=command, b=sccb address) */
395 	sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false);
396 
397 	rc = sclp_service_call(command, sccb);
398 
399 	/* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */
400 	sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0);
401 
402 	if (rc == 0)
403 		active_cmd = command;
404 
405 	return rc;
406 }
407 
408 /* Try to start a request. Return zero if the request was successfully
409  * started or if it will be started at a later time. Return non-zero otherwise.
410  * Called while sclp_lock is locked. */
411 static int
__sclp_start_request(struct sclp_req * req)412 __sclp_start_request(struct sclp_req *req)
413 {
414 	int rc;
415 
416 	if (sclp_running_state != sclp_running_state_idle)
417 		return 0;
418 	timer_delete(&sclp_request_timer);
419 	rc = sclp_service_call_trace(req->command, req->sccb);
420 	req->start_count++;
421 
422 	if (rc == 0) {
423 		/* Successfully started request */
424 		req->status = SCLP_REQ_RUNNING;
425 		sclp_running_state = sclp_running_state_running;
426 		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
427 					 sclp_request_timeout_restart);
428 		return 0;
429 	} else if (rc == -EBUSY) {
430 		/* Try again later */
431 		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
432 					 sclp_request_timeout_normal);
433 		return 0;
434 	}
435 	/* Request failed */
436 	req->status = SCLP_REQ_FAILED;
437 	return rc;
438 }
439 
440 /* Try to start queued requests. */
441 static void
sclp_process_queue(void)442 sclp_process_queue(void)
443 {
444 	struct sclp_req *req;
445 	int rc;
446 	unsigned long flags;
447 
448 	spin_lock_irqsave(&sclp_lock, flags);
449 	if (sclp_running_state != sclp_running_state_idle) {
450 		spin_unlock_irqrestore(&sclp_lock, flags);
451 		return;
452 	}
453 	timer_delete(&sclp_request_timer);
454 	while (!list_empty(&sclp_req_queue)) {
455 		req = list_entry(sclp_req_queue.next, struct sclp_req, list);
456 		rc = __sclp_start_request(req);
457 		if (rc == 0)
458 			break;
459 		/* Request failed */
460 		if (req->start_count > 1) {
461 			/* Cannot abort already submitted request - could still
462 			 * be active at the SCLP */
463 			__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
464 						 sclp_request_timeout_normal);
465 			break;
466 		}
467 		/* Post-processing for aborted request */
468 		list_del(&req->list);
469 
470 		/* RQAB: Request aborted (a=sccb, b=summary) */
471 		sclp_trace_req(2, "RQAB", req, true);
472 
473 		if (req->callback) {
474 			spin_unlock_irqrestore(&sclp_lock, flags);
475 			req->callback(req, req->callback_data);
476 			spin_lock_irqsave(&sclp_lock, flags);
477 		}
478 	}
479 	spin_unlock_irqrestore(&sclp_lock, flags);
480 }
481 
__sclp_can_add_request(struct sclp_req * req)482 static int __sclp_can_add_request(struct sclp_req *req)
483 {
484 	if (req == &sclp_init_req)
485 		return 1;
486 	if (sclp_init_state != sclp_init_state_initialized)
487 		return 0;
488 	if (sclp_activation_state != sclp_activation_state_active)
489 		return 0;
490 	return 1;
491 }
492 
493 /* Queue a new request. Return zero on success, non-zero otherwise. */
494 int
sclp_add_request(struct sclp_req * req)495 sclp_add_request(struct sclp_req *req)
496 {
497 	unsigned long flags;
498 	int rc;
499 
500 	spin_lock_irqsave(&sclp_lock, flags);
501 	if (!__sclp_can_add_request(req)) {
502 		spin_unlock_irqrestore(&sclp_lock, flags);
503 		return -EIO;
504 	}
505 
506 	/* RQAD: Request was added (a=sccb, b=caller) */
507 	sclp_trace(2, "RQAD", __pa(req->sccb), _RET_IP_, false);
508 
509 	req->status = SCLP_REQ_QUEUED;
510 	req->start_count = 0;
511 	list_add_tail(&req->list, &sclp_req_queue);
512 	rc = 0;
513 	if (req->queue_timeout) {
514 		req->queue_expires = jiffies + req->queue_timeout * HZ;
515 		if (!timer_pending(&sclp_queue_timer) ||
516 		    time_after(sclp_queue_timer.expires, req->queue_expires))
517 			mod_timer(&sclp_queue_timer, req->queue_expires);
518 	} else
519 		req->queue_expires = 0;
520 	/* Start if request is first in list */
521 	if (sclp_running_state == sclp_running_state_idle &&
522 	    req->list.prev == &sclp_req_queue) {
523 		rc = __sclp_start_request(req);
524 		if (rc)
525 			list_del(&req->list);
526 	}
527 	spin_unlock_irqrestore(&sclp_lock, flags);
528 	return rc;
529 }
530 
531 EXPORT_SYMBOL(sclp_add_request);
532 
533 /* Dispatch events found in request buffer to registered listeners. Return 0
534  * if all events were dispatched, non-zero otherwise. */
535 static int
sclp_dispatch_evbufs(struct sccb_header * sccb)536 sclp_dispatch_evbufs(struct sccb_header *sccb)
537 {
538 	unsigned long flags;
539 	struct evbuf_header *evbuf;
540 	struct list_head *l;
541 	struct sclp_register *reg;
542 	int offset;
543 	int rc;
544 
545 	spin_lock_irqsave(&sclp_lock, flags);
546 	rc = 0;
547 	for (offset = sizeof(struct sccb_header); offset < sccb->length;
548 	     offset += evbuf->length) {
549 		evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
550 		/* Check for malformed hardware response */
551 		if (evbuf->length == 0)
552 			break;
553 		/* Search for event handler */
554 		reg = NULL;
555 		list_for_each(l, &sclp_reg_list) {
556 			reg = list_entry(l, struct sclp_register, list);
557 			if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
558 				break;
559 			else
560 				reg = NULL;
561 		}
562 
563 		/* EVNT: Event callback (b=receiver) */
564 		sclp_trace_evbuf(2, "EVNT", 0, reg ? (u64)reg->receiver_fn : 0,
565 				 evbuf, !reg);
566 
567 		if (reg && reg->receiver_fn) {
568 			spin_unlock_irqrestore(&sclp_lock, flags);
569 			reg->receiver_fn(evbuf);
570 			spin_lock_irqsave(&sclp_lock, flags);
571 		} else if (reg == NULL)
572 			rc = -EOPNOTSUPP;
573 	}
574 	spin_unlock_irqrestore(&sclp_lock, flags);
575 	return rc;
576 }
577 
578 /* Read event data request callback. */
579 static void
sclp_read_cb(struct sclp_req * req,void * data)580 sclp_read_cb(struct sclp_req *req, void *data)
581 {
582 	unsigned long flags;
583 	struct sccb_header *sccb;
584 
585 	sccb = (struct sccb_header *) req->sccb;
586 	if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
587 	    sccb->response_code == 0x220))
588 		sclp_dispatch_evbufs(sccb);
589 	spin_lock_irqsave(&sclp_lock, flags);
590 	sclp_reading_state = sclp_reading_state_idle;
591 	spin_unlock_irqrestore(&sclp_lock, flags);
592 }
593 
594 /* Prepare read event data request. Called while sclp_lock is locked. */
__sclp_make_read_req(void)595 static void __sclp_make_read_req(void)
596 {
597 	struct sccb_header *sccb;
598 
599 	sccb = (struct sccb_header *) sclp_read_sccb;
600 	clear_page(sccb);
601 	memset(&sclp_read_req, 0, sizeof(struct sclp_req));
602 	sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
603 	sclp_read_req.status = SCLP_REQ_QUEUED;
604 	sclp_read_req.start_count = 0;
605 	sclp_read_req.callback = sclp_read_cb;
606 	sclp_read_req.sccb = sccb;
607 	sccb->length = PAGE_SIZE;
608 	sccb->function_code = 0;
609 	sccb->control_mask[2] = 0x80;
610 }
611 
612 /* Search request list for request with matching sccb. Return request if found,
613  * NULL otherwise. Called while sclp_lock is locked. */
614 static inline struct sclp_req *
__sclp_find_req(u32 sccb)615 __sclp_find_req(u32 sccb)
616 {
617 	struct list_head *l;
618 	struct sclp_req *req;
619 
620 	list_for_each(l, &sclp_req_queue) {
621 		req = list_entry(l, struct sclp_req, list);
622 		if (sccb == __pa(req->sccb))
623 			return req;
624 	}
625 	return NULL;
626 }
627 
ok_response(u32 sccb_int,sclp_cmdw_t cmd)628 static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
629 {
630 	struct sccb_header *sccb = sclpint_to_sccb(sccb_int);
631 	struct evbuf_header *evbuf;
632 	u16 response;
633 
634 	if (!sccb)
635 		return true;
636 
637 	/* Check SCCB response. */
638 	response = sccb->response_code & 0xff;
639 	if (response != 0x10 && response != 0x20)
640 		return false;
641 
642 	/* Check event-processed flag on outgoing events. */
643 	if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) {
644 		evbuf = (struct evbuf_header *)(sccb + 1);
645 		if (!(evbuf->flags & 0x80))
646 			return false;
647 	}
648 
649 	return true;
650 }
651 
652 /* Handler for external interruption. Perform request post-processing.
653  * Prepare read event data request if necessary. Start processing of next
654  * request on queue. */
sclp_interrupt_handler(struct ext_code ext_code,unsigned int param32,unsigned long param64)655 static void sclp_interrupt_handler(struct ext_code ext_code,
656 				   unsigned int param32, unsigned long param64)
657 {
658 	struct sclp_req *req;
659 	u32 finished_sccb;
660 	u32 evbuf_pending;
661 
662 	inc_irq_stat(IRQEXT_SCP);
663 	spin_lock(&sclp_lock);
664 	finished_sccb = param32 & 0xfffffff8;
665 	evbuf_pending = param32 & 0x3;
666 
667 	/* INT: Interrupt received (a=intparm, b=cmd) */
668 	sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
669 			sclpint_to_sccb(finished_sccb),
670 			!ok_response(finished_sccb, active_cmd));
671 
672 	if (finished_sccb) {
673 		timer_delete(&sclp_request_timer);
674 		sclp_running_state = sclp_running_state_reset_pending;
675 		req = __sclp_find_req(finished_sccb);
676 		if (req) {
677 			/* Request post-processing */
678 			list_del(&req->list);
679 			req->status = SCLP_REQ_DONE;
680 
681 			/* RQOK: Request success (a=sccb, b=summary) */
682 			sclp_trace_req(2, "RQOK", req, false);
683 
684 			if (req->callback) {
685 				spin_unlock(&sclp_lock);
686 				req->callback(req, req->callback_data);
687 				spin_lock(&sclp_lock);
688 			}
689 		} else {
690 			/* UNEX: Unexpected SCCB completion (a=sccb address) */
691 			sclp_trace(0, "UNEX", finished_sccb, 0, true);
692 		}
693 		sclp_running_state = sclp_running_state_idle;
694 		active_cmd = 0;
695 	}
696 	if (evbuf_pending &&
697 	    sclp_activation_state == sclp_activation_state_active)
698 		__sclp_queue_read_req();
699 	spin_unlock(&sclp_lock);
700 	sclp_process_queue();
701 }
702 
703 /* Convert interval in jiffies to TOD ticks. */
704 static inline u64
sclp_tod_from_jiffies(unsigned long jiffies)705 sclp_tod_from_jiffies(unsigned long jiffies)
706 {
707 	return (u64) (jiffies / HZ) << 32;
708 }
709 
710 /* Wait until a currently running request finished. Note: while this function
711  * is running, no timers are served on the calling CPU. */
712 void
sclp_sync_wait(void)713 sclp_sync_wait(void)
714 {
715 	unsigned long long old_tick;
716 	struct ctlreg cr0, cr0_sync;
717 	unsigned long flags;
718 	static u64 sync_count;
719 	u64 timeout;
720 	int irq_context;
721 
722 	/* SYN1: Synchronous wait start (a=runstate, b=sync count) */
723 	sclp_trace(4, "SYN1", sclp_running_state, ++sync_count, false);
724 
725 	/* We'll be disabling timer interrupts, so we need a custom timeout
726 	 * mechanism */
727 	timeout = 0;
728 	if (timer_pending(&sclp_request_timer)) {
729 		/* Get timeout TOD value */
730 		timeout = get_tod_clock_monotonic() +
731 			  sclp_tod_from_jiffies(sclp_request_timer.expires -
732 						jiffies);
733 	}
734 	local_irq_save(flags);
735 	/* Prevent bottom half from executing once we force interrupts open */
736 	irq_context = in_interrupt();
737 	if (!irq_context)
738 		local_bh_disable();
739 	/* Enable service-signal interruption, disable timer interrupts */
740 	old_tick = local_tick_disable();
741 	trace_hardirqs_on();
742 	local_ctl_store(0, &cr0);
743 	cr0_sync.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
744 	cr0_sync.val |= 1UL << (63 - 54);
745 	local_ctl_load(0, &cr0_sync);
746 	arch_local_irq_enable_external();
747 	/* Loop until driver state indicates finished request */
748 	while (sclp_running_state != sclp_running_state_idle) {
749 		/* Check for expired request timer */
750 		if (get_tod_clock_monotonic() > timeout && timer_delete(&sclp_request_timer))
751 			sclp_request_timer.function(&sclp_request_timer);
752 		cpu_relax();
753 	}
754 	local_irq_disable();
755 	local_ctl_load(0, &cr0);
756 	if (!irq_context)
757 		_local_bh_enable();
758 	local_tick_enable(old_tick);
759 	local_irq_restore(flags);
760 
761 	/* SYN2: Synchronous wait end (a=runstate, b=sync_count) */
762 	sclp_trace(4, "SYN2", sclp_running_state, sync_count, false);
763 }
764 EXPORT_SYMBOL(sclp_sync_wait);
765 
766 /* Dispatch changes in send and receive mask to registered listeners. */
767 static void
sclp_dispatch_state_change(void)768 sclp_dispatch_state_change(void)
769 {
770 	struct list_head *l;
771 	struct sclp_register *reg;
772 	unsigned long flags;
773 	sccb_mask_t receive_mask;
774 	sccb_mask_t send_mask;
775 
776 	do {
777 		spin_lock_irqsave(&sclp_lock, flags);
778 		reg = NULL;
779 		list_for_each(l, &sclp_reg_list) {
780 			reg = list_entry(l, struct sclp_register, list);
781 			receive_mask = reg->send_mask & sclp_receive_mask;
782 			send_mask = reg->receive_mask & sclp_send_mask;
783 			if (reg->sclp_receive_mask != receive_mask ||
784 			    reg->sclp_send_mask != send_mask) {
785 				reg->sclp_receive_mask = receive_mask;
786 				reg->sclp_send_mask = send_mask;
787 				break;
788 			} else
789 				reg = NULL;
790 		}
791 		spin_unlock_irqrestore(&sclp_lock, flags);
792 		if (reg && reg->state_change_fn) {
793 			/* STCG: State-change callback (b=callback) */
794 			sclp_trace(2, "STCG", 0, (u64)reg->state_change_fn,
795 				   false);
796 
797 			reg->state_change_fn(reg);
798 		}
799 	} while (reg);
800 }
801 
802 struct sclp_statechangebuf {
803 	struct evbuf_header	header;
804 	u8		validity_sclp_active_facility_mask : 1;
805 	u8		validity_sclp_receive_mask : 1;
806 	u8		validity_sclp_send_mask : 1;
807 	u8		validity_read_data_function_mask : 1;
808 	u16		_zeros : 12;
809 	u16		mask_length;
810 	u64		sclp_active_facility_mask;
811 	u8		masks[2 * 1021 + 4];	/* variable length */
812 	/*
813 	 * u8		sclp_receive_mask[mask_length];
814 	 * u8		sclp_send_mask[mask_length];
815 	 * u32		read_data_function_mask;
816 	 */
817 } __attribute__((packed));
818 
819 
820 /* State change event callback. Inform listeners of changes. */
821 static void
sclp_state_change_cb(struct evbuf_header * evbuf)822 sclp_state_change_cb(struct evbuf_header *evbuf)
823 {
824 	unsigned long flags;
825 	struct sclp_statechangebuf *scbuf;
826 
827 	BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
828 
829 	scbuf = (struct sclp_statechangebuf *) evbuf;
830 	spin_lock_irqsave(&sclp_lock, flags);
831 	if (scbuf->validity_sclp_receive_mask)
832 		sclp_receive_mask = sccb_get_recv_mask(scbuf);
833 	if (scbuf->validity_sclp_send_mask)
834 		sclp_send_mask = sccb_get_send_mask(scbuf);
835 	spin_unlock_irqrestore(&sclp_lock, flags);
836 	if (scbuf->validity_sclp_active_facility_mask)
837 		sclp.facilities = scbuf->sclp_active_facility_mask;
838 	sclp_dispatch_state_change();
839 }
840 
841 static struct sclp_register sclp_state_change_event = {
842 	.receive_mask = EVTYP_STATECHANGE_MASK,
843 	.receiver_fn = sclp_state_change_cb
844 };
845 
846 /* Calculate receive and send mask of currently registered listeners.
847  * Called while sclp_lock is locked. */
848 static inline void
__sclp_get_mask(sccb_mask_t * receive_mask,sccb_mask_t * send_mask)849 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
850 {
851 	struct list_head *l;
852 	struct sclp_register *t;
853 
854 	*receive_mask = 0;
855 	*send_mask = 0;
856 	list_for_each(l, &sclp_reg_list) {
857 		t = list_entry(l, struct sclp_register, list);
858 		*receive_mask |= t->receive_mask;
859 		*send_mask |= t->send_mask;
860 	}
861 }
862 
863 /* Register event listener. Return 0 on success, non-zero otherwise. */
864 int
sclp_register(struct sclp_register * reg)865 sclp_register(struct sclp_register *reg)
866 {
867 	unsigned long flags;
868 	sccb_mask_t receive_mask;
869 	sccb_mask_t send_mask;
870 	int rc;
871 
872 	/* REG: Event listener registered (b=caller) */
873 	sclp_trace_register(2, "REG", 0, _RET_IP_, reg);
874 
875 	rc = sclp_init();
876 	if (rc)
877 		return rc;
878 	spin_lock_irqsave(&sclp_lock, flags);
879 	/* Check event mask for collisions */
880 	__sclp_get_mask(&receive_mask, &send_mask);
881 	if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
882 		spin_unlock_irqrestore(&sclp_lock, flags);
883 		return -EBUSY;
884 	}
885 	/* Trigger initial state change callback */
886 	reg->sclp_receive_mask = 0;
887 	reg->sclp_send_mask = 0;
888 	list_add(&reg->list, &sclp_reg_list);
889 	spin_unlock_irqrestore(&sclp_lock, flags);
890 	rc = sclp_init_mask(1);
891 	if (rc) {
892 		spin_lock_irqsave(&sclp_lock, flags);
893 		list_del(&reg->list);
894 		spin_unlock_irqrestore(&sclp_lock, flags);
895 	}
896 	return rc;
897 }
898 
899 EXPORT_SYMBOL(sclp_register);
900 
901 /* Unregister event listener. */
902 void
sclp_unregister(struct sclp_register * reg)903 sclp_unregister(struct sclp_register *reg)
904 {
905 	unsigned long flags;
906 
907 	/* UREG: Event listener unregistered (b=caller) */
908 	sclp_trace_register(2, "UREG", 0, _RET_IP_, reg);
909 
910 	spin_lock_irqsave(&sclp_lock, flags);
911 	list_del(&reg->list);
912 	spin_unlock_irqrestore(&sclp_lock, flags);
913 	sclp_init_mask(1);
914 }
915 
916 EXPORT_SYMBOL(sclp_unregister);
917 
918 /* Remove event buffers which are marked processed. Return the number of
919  * remaining event buffers. */
920 int
sclp_remove_processed(struct sccb_header * sccb)921 sclp_remove_processed(struct sccb_header *sccb)
922 {
923 	struct evbuf_header *evbuf;
924 	int unprocessed;
925 	u16 remaining;
926 
927 	evbuf = (struct evbuf_header *) (sccb + 1);
928 	unprocessed = 0;
929 	remaining = sccb->length - sizeof(struct sccb_header);
930 	while (remaining > 0) {
931 		remaining -= evbuf->length;
932 		if (evbuf->flags & 0x80) {
933 			sccb->length -= evbuf->length;
934 			memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
935 			       remaining);
936 		} else {
937 			unprocessed++;
938 			evbuf = (struct evbuf_header *)
939 					((addr_t) evbuf + evbuf->length);
940 		}
941 	}
942 	return unprocessed;
943 }
944 
945 EXPORT_SYMBOL(sclp_remove_processed);
946 
947 /* Prepare init mask request. Called while sclp_lock is locked. */
948 static inline void
__sclp_make_init_req(sccb_mask_t receive_mask,sccb_mask_t send_mask)949 __sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
950 {
951 	struct init_sccb *sccb = sclp_init_sccb;
952 
953 	clear_page(sccb);
954 	memset(&sclp_init_req, 0, sizeof(struct sclp_req));
955 	sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
956 	sclp_init_req.status = SCLP_REQ_FILLED;
957 	sclp_init_req.start_count = 0;
958 	sclp_init_req.callback = NULL;
959 	sclp_init_req.callback_data = NULL;
960 	sclp_init_req.sccb = sccb;
961 	sccb->header.length = sizeof(*sccb);
962 	if (sclp_mask_compat_mode)
963 		sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
964 	else
965 		sccb->mask_length = sizeof(sccb_mask_t);
966 	sccb_set_recv_mask(sccb, receive_mask);
967 	sccb_set_send_mask(sccb, send_mask);
968 	sccb_set_sclp_recv_mask(sccb, 0);
969 	sccb_set_sclp_send_mask(sccb, 0);
970 }
971 
972 /* Start init mask request. If calculate is non-zero, calculate the mask as
973  * requested by registered listeners. Use zero mask otherwise. Return 0 on
974  * success, non-zero otherwise. */
975 static int
sclp_init_mask(int calculate)976 sclp_init_mask(int calculate)
977 {
978 	unsigned long flags;
979 	struct init_sccb *sccb = sclp_init_sccb;
980 	sccb_mask_t receive_mask;
981 	sccb_mask_t send_mask;
982 	int retry;
983 	int rc;
984 	unsigned long wait;
985 
986 	spin_lock_irqsave(&sclp_lock, flags);
987 	/* Check if interface is in appropriate state */
988 	if (sclp_mask_state != sclp_mask_state_idle) {
989 		spin_unlock_irqrestore(&sclp_lock, flags);
990 		return -EBUSY;
991 	}
992 	if (sclp_activation_state == sclp_activation_state_inactive) {
993 		spin_unlock_irqrestore(&sclp_lock, flags);
994 		return -EINVAL;
995 	}
996 	sclp_mask_state = sclp_mask_state_initializing;
997 	/* Determine mask */
998 	if (calculate)
999 		__sclp_get_mask(&receive_mask, &send_mask);
1000 	else {
1001 		receive_mask = 0;
1002 		send_mask = 0;
1003 	}
1004 	rc = -EIO;
1005 	for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
1006 		/* Prepare request */
1007 		__sclp_make_init_req(receive_mask, send_mask);
1008 		spin_unlock_irqrestore(&sclp_lock, flags);
1009 		if (sclp_add_request(&sclp_init_req)) {
1010 			/* Try again later */
1011 			wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
1012 			while (time_before(jiffies, wait))
1013 				sclp_sync_wait();
1014 			spin_lock_irqsave(&sclp_lock, flags);
1015 			continue;
1016 		}
1017 		while (sclp_init_req.status != SCLP_REQ_DONE &&
1018 		       sclp_init_req.status != SCLP_REQ_FAILED)
1019 			sclp_sync_wait();
1020 		spin_lock_irqsave(&sclp_lock, flags);
1021 		if (sclp_init_req.status == SCLP_REQ_DONE &&
1022 		    sccb->header.response_code == 0x20) {
1023 			/* Successful request */
1024 			if (calculate) {
1025 				sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
1026 				sclp_send_mask = sccb_get_sclp_send_mask(sccb);
1027 			} else {
1028 				sclp_receive_mask = 0;
1029 				sclp_send_mask = 0;
1030 			}
1031 			spin_unlock_irqrestore(&sclp_lock, flags);
1032 			sclp_dispatch_state_change();
1033 			spin_lock_irqsave(&sclp_lock, flags);
1034 			rc = 0;
1035 			break;
1036 		}
1037 	}
1038 	sclp_mask_state = sclp_mask_state_idle;
1039 	spin_unlock_irqrestore(&sclp_lock, flags);
1040 	return rc;
1041 }
1042 
1043 /* Deactivate SCLP interface. On success, new requests will be rejected,
1044  * events will no longer be dispatched. Return 0 on success, non-zero
1045  * otherwise. */
1046 int
sclp_deactivate(void)1047 sclp_deactivate(void)
1048 {
1049 	unsigned long flags;
1050 	int rc;
1051 
1052 	spin_lock_irqsave(&sclp_lock, flags);
1053 	/* Deactivate can only be called when active */
1054 	if (sclp_activation_state != sclp_activation_state_active) {
1055 		spin_unlock_irqrestore(&sclp_lock, flags);
1056 		return -EINVAL;
1057 	}
1058 	sclp_activation_state = sclp_activation_state_deactivating;
1059 	spin_unlock_irqrestore(&sclp_lock, flags);
1060 	rc = sclp_init_mask(0);
1061 	spin_lock_irqsave(&sclp_lock, flags);
1062 	if (rc == 0)
1063 		sclp_activation_state = sclp_activation_state_inactive;
1064 	else
1065 		sclp_activation_state = sclp_activation_state_active;
1066 	spin_unlock_irqrestore(&sclp_lock, flags);
1067 	return rc;
1068 }
1069 
1070 EXPORT_SYMBOL(sclp_deactivate);
1071 
1072 /* Reactivate SCLP interface after sclp_deactivate. On success, new
1073  * requests will be accepted, events will be dispatched again. Return 0 on
1074  * success, non-zero otherwise. */
1075 int
sclp_reactivate(void)1076 sclp_reactivate(void)
1077 {
1078 	unsigned long flags;
1079 	int rc;
1080 
1081 	spin_lock_irqsave(&sclp_lock, flags);
1082 	/* Reactivate can only be called when inactive */
1083 	if (sclp_activation_state != sclp_activation_state_inactive) {
1084 		spin_unlock_irqrestore(&sclp_lock, flags);
1085 		return -EINVAL;
1086 	}
1087 	sclp_activation_state = sclp_activation_state_activating;
1088 	spin_unlock_irqrestore(&sclp_lock, flags);
1089 	rc = sclp_init_mask(1);
1090 	spin_lock_irqsave(&sclp_lock, flags);
1091 	if (rc == 0)
1092 		sclp_activation_state = sclp_activation_state_active;
1093 	else
1094 		sclp_activation_state = sclp_activation_state_inactive;
1095 	spin_unlock_irqrestore(&sclp_lock, flags);
1096 	return rc;
1097 }
1098 
1099 EXPORT_SYMBOL(sclp_reactivate);
1100 
1101 /* Handler for external interruption used during initialization. Modify
1102  * request state to done. */
sclp_check_handler(struct ext_code ext_code,unsigned int param32,unsigned long param64)1103 static void sclp_check_handler(struct ext_code ext_code,
1104 			       unsigned int param32, unsigned long param64)
1105 {
1106 	u32 finished_sccb;
1107 
1108 	inc_irq_stat(IRQEXT_SCP);
1109 	finished_sccb = param32 & 0xfffffff8;
1110 	/* Is this the interrupt we are waiting for? */
1111 	if (finished_sccb == 0)
1112 		return;
1113 	if (finished_sccb != __pa(sclp_init_sccb))
1114 		panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
1115 		      finished_sccb);
1116 	spin_lock(&sclp_lock);
1117 	if (sclp_running_state == sclp_running_state_running) {
1118 		sclp_init_req.status = SCLP_REQ_DONE;
1119 		sclp_running_state = sclp_running_state_idle;
1120 	}
1121 	spin_unlock(&sclp_lock);
1122 }
1123 
1124 /* Initial init mask request timed out. Modify request state to failed. */
1125 static void
sclp_check_timeout(struct timer_list * unused)1126 sclp_check_timeout(struct timer_list *unused)
1127 {
1128 	unsigned long flags;
1129 
1130 	spin_lock_irqsave(&sclp_lock, flags);
1131 	if (sclp_running_state == sclp_running_state_running) {
1132 		sclp_init_req.status = SCLP_REQ_FAILED;
1133 		sclp_running_state = sclp_running_state_idle;
1134 	}
1135 	spin_unlock_irqrestore(&sclp_lock, flags);
1136 }
1137 
1138 /* Perform a check of the SCLP interface. Return zero if the interface is
1139  * available and there are no pending requests from a previous instance.
1140  * Return non-zero otherwise. */
1141 static int
sclp_check_interface(void)1142 sclp_check_interface(void)
1143 {
1144 	struct init_sccb *sccb;
1145 	unsigned long flags;
1146 	int retry;
1147 	int rc;
1148 
1149 	spin_lock_irqsave(&sclp_lock, flags);
1150 	/* Prepare init mask command */
1151 	rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
1152 	if (rc) {
1153 		spin_unlock_irqrestore(&sclp_lock, flags);
1154 		return rc;
1155 	}
1156 	for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
1157 		__sclp_make_init_req(0, 0);
1158 		sccb = (struct init_sccb *) sclp_init_req.sccb;
1159 		rc = sclp_service_call_trace(sclp_init_req.command, sccb);
1160 		if (rc == -EIO)
1161 			break;
1162 		sclp_init_req.status = SCLP_REQ_RUNNING;
1163 		sclp_running_state = sclp_running_state_running;
1164 		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
1165 					 sclp_check_timeout);
1166 		spin_unlock_irqrestore(&sclp_lock, flags);
1167 		/* Enable service-signal interruption - needs to happen
1168 		 * with IRQs enabled. */
1169 		irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
1170 		/* Wait for signal from interrupt or timeout */
1171 		sclp_sync_wait();
1172 		/* Disable service-signal interruption - needs to happen
1173 		 * with IRQs enabled. */
1174 		irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
1175 		spin_lock_irqsave(&sclp_lock, flags);
1176 		timer_delete(&sclp_request_timer);
1177 		rc = -EBUSY;
1178 		if (sclp_init_req.status == SCLP_REQ_DONE) {
1179 			if (sccb->header.response_code == 0x20) {
1180 				rc = 0;
1181 				break;
1182 			} else if (sccb->header.response_code == 0x74f0) {
1183 				if (!sclp_mask_compat_mode) {
1184 					sclp_mask_compat_mode = true;
1185 					retry = 0;
1186 				}
1187 			}
1188 		}
1189 	}
1190 	unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
1191 	spin_unlock_irqrestore(&sclp_lock, flags);
1192 	return rc;
1193 }
1194 
1195 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
1196  * events from interfering with rebooted system. */
1197 static int
sclp_reboot_event(struct notifier_block * this,unsigned long event,void * ptr)1198 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
1199 {
1200 	sclp_deactivate();
1201 	return NOTIFY_DONE;
1202 }
1203 
1204 static struct notifier_block sclp_reboot_notifier = {
1205 	.notifier_call = sclp_reboot_event,
1206 	.priority      = INT_MIN,
1207 };
1208 
con_pages_show(struct device_driver * dev,char * buf)1209 static ssize_t con_pages_show(struct device_driver *dev, char *buf)
1210 {
1211 	return sysfs_emit(buf, "%i\n", sclp_console_pages);
1212 }
1213 
1214 static DRIVER_ATTR_RO(con_pages);
1215 
con_drop_store(struct device_driver * dev,const char * buf,size_t count)1216 static ssize_t con_drop_store(struct device_driver *dev, const char *buf, size_t count)
1217 {
1218 	int rc;
1219 
1220 	rc = kstrtobool(buf, &sclp_console_drop);
1221 	return rc ?: count;
1222 }
1223 
con_drop_show(struct device_driver * dev,char * buf)1224 static ssize_t con_drop_show(struct device_driver *dev, char *buf)
1225 {
1226 	return sysfs_emit(buf, "%i\n", sclp_console_drop);
1227 }
1228 
1229 static DRIVER_ATTR_RW(con_drop);
1230 
con_full_show(struct device_driver * dev,char * buf)1231 static ssize_t con_full_show(struct device_driver *dev, char *buf)
1232 {
1233 	return sysfs_emit(buf, "%lu\n", sclp_console_full);
1234 }
1235 
1236 static DRIVER_ATTR_RO(con_full);
1237 
1238 static struct attribute *sclp_drv_attrs[] = {
1239 	&driver_attr_con_pages.attr,
1240 	&driver_attr_con_drop.attr,
1241 	&driver_attr_con_full.attr,
1242 	NULL,
1243 };
1244 static struct attribute_group sclp_drv_attr_group = {
1245 	.attrs = sclp_drv_attrs,
1246 };
1247 static const struct attribute_group *sclp_drv_attr_groups[] = {
1248 	&sclp_drv_attr_group,
1249 	NULL,
1250 };
1251 
1252 static struct platform_driver sclp_pdrv = {
1253 	.driver = {
1254 		.name	= "sclp",
1255 		.groups = sclp_drv_attr_groups,
1256 	},
1257 };
1258 
1259 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1260  * otherwise. */
sclp_init(void)1261 int sclp_init(void)
1262 {
1263 	unsigned long flags;
1264 	int rc = 0;
1265 
1266 	spin_lock_irqsave(&sclp_lock, flags);
1267 	/* Check for previous or running initialization */
1268 	if (sclp_init_state != sclp_init_state_uninitialized)
1269 		goto fail_unlock;
1270 	sclp_init_state = sclp_init_state_initializing;
1271 	sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
1272 	sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
1273 	BUG_ON(!sclp_read_sccb || !sclp_init_sccb);
1274 	/* Set up variables */
1275 	list_add(&sclp_state_change_event.list, &sclp_reg_list);
1276 	timer_setup(&sclp_request_timer, NULL, 0);
1277 	timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
1278 	/* Check interface */
1279 	spin_unlock_irqrestore(&sclp_lock, flags);
1280 	rc = sclp_check_interface();
1281 	spin_lock_irqsave(&sclp_lock, flags);
1282 	if (rc)
1283 		goto fail_init_state_uninitialized;
1284 	/* Register reboot handler */
1285 	rc = register_reboot_notifier(&sclp_reboot_notifier);
1286 	if (rc)
1287 		goto fail_init_state_uninitialized;
1288 	/* Register interrupt handler */
1289 	rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
1290 	if (rc)
1291 		goto fail_unregister_reboot_notifier;
1292 	sclp_init_state = sclp_init_state_initialized;
1293 	spin_unlock_irqrestore(&sclp_lock, flags);
1294 	/* Enable service-signal external interruption - needs to happen with
1295 	 * IRQs enabled. */
1296 	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
1297 	sclp_init_mask(1);
1298 	return 0;
1299 
1300 fail_unregister_reboot_notifier:
1301 	unregister_reboot_notifier(&sclp_reboot_notifier);
1302 fail_init_state_uninitialized:
1303 	list_del(&sclp_state_change_event.list);
1304 	sclp_init_state = sclp_init_state_uninitialized;
1305 	free_page((unsigned long) sclp_read_sccb);
1306 	free_page((unsigned long) sclp_init_sccb);
1307 fail_unlock:
1308 	spin_unlock_irqrestore(&sclp_lock, flags);
1309 	return rc;
1310 }
1311 
sclp_initcall(void)1312 static __init int sclp_initcall(void)
1313 {
1314 	return platform_driver_register(&sclp_pdrv);
1315 }
1316 
1317 arch_initcall(sclp_initcall);
1318