xref: /linux/drivers/s390/cio/qdio_main.c (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Linux for s390 qdio support, buffer handling, qdio API and module support.
4  *
5  * Copyright IBM Corp. 2000, 2008
6  * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
7  *	      Jan Glauber <jang@linux.vnet.ibm.com>
8  * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
9  */
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/kmemleak.h>
14 #include <linux/delay.h>
15 #include <linux/gfp.h>
16 #include <linux/io.h>
17 #include <linux/atomic.h>
18 #include <asm/debug.h>
19 #include <asm/qdio.h>
20 #include <asm/ipl.h>
21 
22 #include "cio.h"
23 #include "css.h"
24 #include "device.h"
25 #include "qdio.h"
26 #include "qdio_debug.h"
27 
28 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 	"Jan Glauber <jang@linux.vnet.ibm.com>");
30 MODULE_DESCRIPTION("QDIO base support");
31 MODULE_LICENSE("GPL");
32 
33 static inline int do_siga_sync(unsigned long schid,
34 			       unsigned long out_mask, unsigned long in_mask,
35 			       unsigned int fc)
36 {
37 	int cc;
38 
39 	asm volatile(
40 		"	lgr	0,%[fc]\n"
41 		"	lgr	1,%[schid]\n"
42 		"	lgr	2,%[out]\n"
43 		"	lgr	3,%[in]\n"
44 		"	siga	0\n"
45 		"	ipm	%[cc]\n"
46 		"	srl	%[cc],28\n"
47 		: [cc] "=&d" (cc)
48 		: [fc] "d" (fc), [schid] "d" (schid),
49 		  [out] "d" (out_mask), [in] "d" (in_mask)
50 		: "cc", "0", "1", "2", "3");
51 	return cc;
52 }
53 
54 static inline int do_siga_input(unsigned long schid, unsigned long mask,
55 				unsigned long fc)
56 {
57 	int cc;
58 
59 	asm volatile(
60 		"	lgr	0,%[fc]\n"
61 		"	lgr	1,%[schid]\n"
62 		"	lgr	2,%[mask]\n"
63 		"	siga	0\n"
64 		"	ipm	%[cc]\n"
65 		"	srl	%[cc],28\n"
66 		: [cc] "=&d" (cc)
67 		: [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
68 		: "cc", "0", "1", "2");
69 	return cc;
70 }
71 
72 /**
73  * do_siga_output - perform SIGA-w/wt function
74  * @schid: subchannel id or in case of QEBSM the subchannel token
75  * @mask: which output queues to process
76  * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
77  * @fc: function code to perform
78  * @aob: asynchronous operation block
79  *
80  * Returns condition code.
81  * Note: For IQDC unicast queues only the highest priority queue is processed.
82  */
83 static inline int do_siga_output(unsigned long schid, unsigned long mask,
84 				 unsigned int *bb, unsigned long fc,
85 				 unsigned long aob)
86 {
87 	int cc;
88 
89 	asm volatile(
90 		"	lgr	0,%[fc]\n"
91 		"	lgr	1,%[schid]\n"
92 		"	lgr	2,%[mask]\n"
93 		"	lgr	3,%[aob]\n"
94 		"	siga	0\n"
95 		"	lgr	%[fc],0\n"
96 		"	ipm	%[cc]\n"
97 		"	srl	%[cc],28\n"
98 		: [cc] "=&d" (cc), [fc] "+&d" (fc)
99 		: [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
100 		: "cc", "0", "1", "2", "3");
101 	*bb = fc >> 31;
102 	return cc;
103 }
104 
105 /**
106  * qdio_do_eqbs - extract buffer states for QEBSM
107  * @q: queue to manipulate
108  * @state: state of the extracted buffers
109  * @start: buffer number to start at
110  * @count: count of buffers to examine
111  * @auto_ack: automatically acknowledge buffers
112  *
113  * Returns the number of successfully extracted equal buffer states.
114  * Stops processing if a state is different from the last buffers state.
115  */
116 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
117 			int start, int count, int auto_ack)
118 {
119 	int tmp_count = count, tmp_start = start, nr = q->nr;
120 	unsigned int ccq = 0;
121 
122 	qperf_inc(q, eqbs);
123 
124 	if (!q->is_input_q)
125 		nr += q->irq_ptr->nr_input_qs;
126 again:
127 	ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
128 		      auto_ack);
129 
130 	switch (ccq) {
131 	case 0:
132 	case 32:
133 		/* all done, or next buffer state different */
134 		return count - tmp_count;
135 	case 96:
136 		/* not all buffers processed */
137 		qperf_inc(q, eqbs_partial);
138 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
139 			tmp_count);
140 		return count - tmp_count;
141 	case 97:
142 		/* no buffer processed */
143 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
144 		goto again;
145 	default:
146 		DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
147 		DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
148 		DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
149 		q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
150 			   q->first_to_check, count, q->irq_ptr->int_parm);
151 		return 0;
152 	}
153 }
154 
155 /**
156  * qdio_do_sqbs - set buffer states for QEBSM
157  * @q: queue to manipulate
158  * @state: new state of the buffers
159  * @start: first buffer number to change
160  * @count: how many buffers to change
161  *
162  * Returns the number of successfully changed buffers.
163  * Does retrying until the specified count of buffer states is set or an
164  * error occurs.
165  */
166 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
167 			int count)
168 {
169 	unsigned int ccq = 0;
170 	int tmp_count = count, tmp_start = start;
171 	int nr = q->nr;
172 
173 	qperf_inc(q, sqbs);
174 
175 	if (!q->is_input_q)
176 		nr += q->irq_ptr->nr_input_qs;
177 again:
178 	ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
179 
180 	switch (ccq) {
181 	case 0:
182 	case 32:
183 		/* all done, or active buffer adapter-owned */
184 		WARN_ON_ONCE(tmp_count);
185 		return count - tmp_count;
186 	case 96:
187 		/* not all buffers processed */
188 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
189 		qperf_inc(q, sqbs_partial);
190 		goto again;
191 	default:
192 		DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
193 		DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
194 		DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
195 		q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
196 			   q->first_to_check, count, q->irq_ptr->int_parm);
197 		return 0;
198 	}
199 }
200 
201 /*
202  * Returns number of examined buffers and their common state in *state.
203  * Requested number of buffers-to-examine must be > 0.
204  */
205 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
206 				 unsigned char *state, unsigned int count,
207 				 int auto_ack)
208 {
209 	unsigned char __state = 0;
210 	int i = 1;
211 
212 	if (is_qebsm(q))
213 		return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
214 
215 	/* get initial state: */
216 	__state = q->slsb.val[bufnr];
217 
218 	/* Bail out early if there is no work on the queue: */
219 	if (__state & SLSB_OWNER_CU)
220 		goto out;
221 
222 	for (; i < count; i++) {
223 		bufnr = next_buf(bufnr);
224 
225 		/* stop if next state differs from initial state: */
226 		if (q->slsb.val[bufnr] != __state)
227 			break;
228 	}
229 
230 out:
231 	*state = __state;
232 	return i;
233 }
234 
235 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
236 				unsigned char *state, int auto_ack)
237 {
238 	return get_buf_states(q, bufnr, state, 1, auto_ack);
239 }
240 
241 /* wrap-around safe setting of slsb states, returns number of changed buffers */
242 static inline int set_buf_states(struct qdio_q *q, int bufnr,
243 				 unsigned char state, int count)
244 {
245 	int i;
246 
247 	if (is_qebsm(q))
248 		return qdio_do_sqbs(q, state, bufnr, count);
249 
250 	/* Ensure that all preceding changes to the SBALs are visible: */
251 	mb();
252 
253 	for (i = 0; i < count; i++) {
254 		WRITE_ONCE(q->slsb.val[bufnr], state);
255 		bufnr = next_buf(bufnr);
256 	}
257 
258 	/* Make our SLSB changes visible: */
259 	mb();
260 
261 	return count;
262 }
263 
264 static inline int set_buf_state(struct qdio_q *q, int bufnr,
265 				unsigned char state)
266 {
267 	return set_buf_states(q, bufnr, state, 1);
268 }
269 
270 /* set slsb states to initial state */
271 static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
272 {
273 	struct qdio_q *q;
274 	int i;
275 
276 	for_each_input_queue(irq_ptr, q, i)
277 		set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
278 			       QDIO_MAX_BUFFERS_PER_Q);
279 	for_each_output_queue(irq_ptr, q, i)
280 		set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
281 			       QDIO_MAX_BUFFERS_PER_Q);
282 }
283 
284 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
285 			  unsigned int input)
286 {
287 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
288 	unsigned int fc = QDIO_SIGA_SYNC;
289 	int cc;
290 
291 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
292 	qperf_inc(q, siga_sync);
293 
294 	if (is_qebsm(q)) {
295 		schid = q->irq_ptr->sch_token;
296 		fc |= QDIO_SIGA_QEBSM_FLAG;
297 	}
298 
299 	cc = do_siga_sync(schid, output, input, fc);
300 	if (unlikely(cc))
301 		DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
302 	return (cc) ? -EIO : 0;
303 }
304 
305 static inline int qdio_sync_input_queue(struct qdio_q *q)
306 {
307 	return qdio_siga_sync(q, 0, q->mask);
308 }
309 
310 static inline int qdio_sync_output_queue(struct qdio_q *q)
311 {
312 	return qdio_siga_sync(q, q->mask, 0);
313 }
314 
315 static inline int qdio_siga_sync_q(struct qdio_q *q)
316 {
317 	if (q->is_input_q)
318 		return qdio_sync_input_queue(q);
319 	else
320 		return qdio_sync_output_queue(q);
321 }
322 
323 static int qdio_siga_output(struct qdio_q *q, unsigned int count,
324 			    unsigned int *busy_bit, unsigned long aob)
325 {
326 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
327 	unsigned int fc = QDIO_SIGA_WRITE;
328 	u64 start_time = 0;
329 	int retries = 0, cc;
330 
331 	if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
332 		if (count > 1)
333 			fc = QDIO_SIGA_WRITEM;
334 		else if (aob)
335 			fc = QDIO_SIGA_WRITEQ;
336 	}
337 
338 	if (is_qebsm(q)) {
339 		schid = q->irq_ptr->sch_token;
340 		fc |= QDIO_SIGA_QEBSM_FLAG;
341 	}
342 again:
343 	cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
344 
345 	/* hipersocket busy condition */
346 	if (unlikely(*busy_bit)) {
347 		retries++;
348 
349 		if (!start_time) {
350 			start_time = get_tod_clock_fast();
351 			goto again;
352 		}
353 		if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
354 			goto again;
355 	}
356 	if (retries) {
357 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
358 			      "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
359 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
360 	}
361 	return cc;
362 }
363 
364 static inline int qdio_siga_input(struct qdio_q *q)
365 {
366 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
367 	unsigned int fc = QDIO_SIGA_READ;
368 	int cc;
369 
370 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
371 	qperf_inc(q, siga_read);
372 
373 	if (is_qebsm(q)) {
374 		schid = q->irq_ptr->sch_token;
375 		fc |= QDIO_SIGA_QEBSM_FLAG;
376 	}
377 
378 	cc = do_siga_input(schid, q->mask, fc);
379 	if (unlikely(cc))
380 		DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
381 	return (cc) ? -EIO : 0;
382 }
383 
384 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
385 			unsigned char *state)
386 {
387 	if (qdio_need_siga_sync(q->irq_ptr))
388 		qdio_siga_sync_q(q);
389 	return get_buf_state(q, bufnr, state, 0);
390 }
391 
392 static inline void qdio_stop_polling(struct qdio_q *q)
393 {
394 	if (!q->u.in.batch_count)
395 		return;
396 
397 	qperf_inc(q, stop_polling);
398 
399 	/* show the card that we are not polling anymore */
400 	set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
401 		       q->u.in.batch_count);
402 	q->u.in.batch_count = 0;
403 }
404 
405 static inline void account_sbals(struct qdio_q *q, unsigned int count)
406 {
407 	q->q_stats.nr_sbal_total += count;
408 	q->q_stats.nr_sbals[ilog2(count)]++;
409 }
410 
411 static void process_buffer_error(struct qdio_q *q, unsigned int start,
412 				 int count)
413 {
414 	/* special handling for no target buffer empty */
415 	if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
416 	    q->sbal[start]->element[15].sflags == 0x10) {
417 		qperf_inc(q, target_full);
418 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
419 		return;
420 	}
421 
422 	DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
423 	DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
424 	DBF_ERROR("FTC:%3d C:%3d", start, count);
425 	DBF_ERROR("F14:%2x F15:%2x",
426 		  q->sbal[start]->element[14].sflags,
427 		  q->sbal[start]->element[15].sflags);
428 }
429 
430 static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
431 				       int count, bool auto_ack)
432 {
433 	/* ACK the newest SBAL: */
434 	if (!auto_ack)
435 		set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
436 
437 	if (!q->u.in.batch_count)
438 		q->u.in.batch_start = start;
439 	q->u.in.batch_count += count;
440 }
441 
442 static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
443 				       unsigned int *error)
444 {
445 	unsigned char state = 0;
446 	int count;
447 
448 	q->timestamp = get_tod_clock_fast();
449 
450 	count = atomic_read(&q->nr_buf_used);
451 	if (!count)
452 		return 0;
453 
454 	if (qdio_need_siga_sync(q->irq_ptr))
455 		qdio_sync_input_queue(q);
456 
457 	count = get_buf_states(q, start, &state, count, 1);
458 	if (!count)
459 		return 0;
460 
461 	switch (state) {
462 	case SLSB_P_INPUT_PRIMED:
463 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
464 			      count);
465 
466 		inbound_handle_work(q, start, count, is_qebsm(q));
467 		if (atomic_sub_return(count, &q->nr_buf_used) == 0)
468 			qperf_inc(q, inbound_queue_full);
469 		if (q->irq_ptr->perf_stat_enabled)
470 			account_sbals(q, count);
471 		return count;
472 	case SLSB_P_INPUT_ERROR:
473 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
474 			      count);
475 
476 		*error = QDIO_ERROR_SLSB_STATE;
477 		process_buffer_error(q, start, count);
478 		inbound_handle_work(q, start, count, false);
479 		if (atomic_sub_return(count, &q->nr_buf_used) == 0)
480 			qperf_inc(q, inbound_queue_full);
481 		if (q->irq_ptr->perf_stat_enabled)
482 			account_sbals_error(q, count);
483 		return count;
484 	case SLSB_CU_INPUT_EMPTY:
485 		if (q->irq_ptr->perf_stat_enabled)
486 			q->q_stats.nr_sbal_nop++;
487 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
488 			      q->nr, start);
489 		return 0;
490 	case SLSB_P_INPUT_NOT_INIT:
491 	case SLSB_P_INPUT_ACK:
492 		/* We should never see this state, throw a WARN: */
493 	default:
494 		dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
495 			      "found state %#x at index %u on queue %u\n",
496 			      state, start, q->nr);
497 		return 0;
498 	}
499 }
500 
501 int qdio_inspect_input_queue(struct ccw_device *cdev, unsigned int nr,
502 			     unsigned int *bufnr, unsigned int *error)
503 {
504 	struct qdio_irq *irq = cdev->private->qdio_data;
505 	unsigned int start;
506 	struct qdio_q *q;
507 	int count;
508 
509 	if (!irq)
510 		return -ENODEV;
511 
512 	q = irq->input_qs[nr];
513 	start = q->first_to_check;
514 	*error = 0;
515 
516 	count = get_inbound_buffer_frontier(q, start, error);
517 	if (count == 0)
518 		return 0;
519 
520 	*bufnr = start;
521 	q->first_to_check = add_buf(start, count);
522 	return count;
523 }
524 EXPORT_SYMBOL_GPL(qdio_inspect_input_queue);
525 
526 static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
527 {
528 	unsigned char state = 0;
529 
530 	if (!atomic_read(&q->nr_buf_used))
531 		return 1;
532 
533 	if (qdio_need_siga_sync(q->irq_ptr))
534 		qdio_sync_input_queue(q);
535 	get_buf_state(q, start, &state, 0);
536 
537 	if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
538 		/* more work coming */
539 		return 0;
540 
541 	return 1;
542 }
543 
544 static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
545 					unsigned int *error)
546 {
547 	unsigned char state = 0;
548 	int count;
549 
550 	q->timestamp = get_tod_clock_fast();
551 
552 	count = atomic_read(&q->nr_buf_used);
553 	if (!count)
554 		return 0;
555 
556 	if (qdio_need_siga_sync(q->irq_ptr))
557 		qdio_sync_output_queue(q);
558 
559 	count = get_buf_states(q, start, &state, count, 0);
560 	if (!count)
561 		return 0;
562 
563 	switch (state) {
564 	case SLSB_P_OUTPUT_PENDING:
565 		*error = QDIO_ERROR_SLSB_PENDING;
566 		fallthrough;
567 	case SLSB_P_OUTPUT_EMPTY:
568 		/* the adapter got it */
569 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
570 			"out empty:%1d %02x", q->nr, count);
571 
572 		atomic_sub(count, &q->nr_buf_used);
573 		if (q->irq_ptr->perf_stat_enabled)
574 			account_sbals(q, count);
575 		return count;
576 	case SLSB_P_OUTPUT_ERROR:
577 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x",
578 			      q->nr, count);
579 
580 		*error = QDIO_ERROR_SLSB_STATE;
581 		process_buffer_error(q, start, count);
582 		atomic_sub(count, &q->nr_buf_used);
583 		if (q->irq_ptr->perf_stat_enabled)
584 			account_sbals_error(q, count);
585 		return count;
586 	case SLSB_CU_OUTPUT_PRIMED:
587 		/* the adapter has not fetched the output yet */
588 		if (q->irq_ptr->perf_stat_enabled)
589 			q->q_stats.nr_sbal_nop++;
590 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
591 			      q->nr);
592 		return 0;
593 	case SLSB_P_OUTPUT_HALTED:
594 		return 0;
595 	case SLSB_P_OUTPUT_NOT_INIT:
596 		/* We should never see this state, throw a WARN: */
597 	default:
598 		dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
599 			      "found state %#x at index %u on queue %u\n",
600 			      state, start, q->nr);
601 		return 0;
602 	}
603 }
604 
605 int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr,
606 			      unsigned int *bufnr, unsigned int *error)
607 {
608 	struct qdio_irq *irq = cdev->private->qdio_data;
609 	unsigned int start;
610 	struct qdio_q *q;
611 	int count;
612 
613 	if (!irq)
614 		return -ENODEV;
615 
616 	q = irq->output_qs[nr];
617 	start = q->first_to_check;
618 	*error = 0;
619 
620 	count = get_outbound_buffer_frontier(q, start, error);
621 	if (count == 0)
622 		return 0;
623 
624 	*bufnr = start;
625 	q->first_to_check = add_buf(start, count);
626 	return count;
627 }
628 EXPORT_SYMBOL_GPL(qdio_inspect_output_queue);
629 
630 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
631 				unsigned long aob)
632 {
633 	int retries = 0, cc;
634 	unsigned int busy_bit;
635 
636 	if (!qdio_need_siga_out(q->irq_ptr))
637 		return 0;
638 
639 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
640 retry:
641 	qperf_inc(q, siga_write);
642 
643 	cc = qdio_siga_output(q, count, &busy_bit, aob);
644 	switch (cc) {
645 	case 0:
646 		break;
647 	case 2:
648 		if (busy_bit) {
649 			while (++retries < QDIO_BUSY_BIT_RETRIES) {
650 				mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
651 				goto retry;
652 			}
653 			DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
654 			cc = -EBUSY;
655 		} else {
656 			DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
657 			cc = -ENOBUFS;
658 		}
659 		break;
660 	case 1:
661 	case 3:
662 		DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
663 		cc = -EIO;
664 		break;
665 	}
666 	if (retries) {
667 		DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
668 		DBF_ERROR("count:%u", retries);
669 	}
670 	return cc;
671 }
672 
673 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
674 				  enum qdio_irq_states state)
675 {
676 	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
677 
678 	irq_ptr->state = state;
679 	mb();
680 }
681 
682 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
683 {
684 	if (irb->esw.esw0.erw.cons) {
685 		DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
686 		DBF_ERROR_HEX(irb, 64);
687 		DBF_ERROR_HEX(irb->ecw, 64);
688 	}
689 }
690 
691 /* PCI interrupt handler */
692 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
693 {
694 	if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
695 		return;
696 
697 	qdio_deliver_irq(irq_ptr);
698 	irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
699 }
700 
701 static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
702 				       unsigned long intparm, int cstat,
703 				       int dstat)
704 {
705 	unsigned int first_to_check = 0;
706 
707 	DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
708 	DBF_ERROR("intp :%lx", intparm);
709 	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
710 
711 	/* zfcp wants this: */
712 	if (irq_ptr->nr_input_qs)
713 		first_to_check = irq_ptr->input_qs[0]->first_to_check;
714 
715 	irq_ptr->error_handler(irq_ptr->cdev, QDIO_ERROR_ACTIVATE, 0,
716 			       first_to_check, 0, irq_ptr->int_parm);
717 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
718 	/*
719 	 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
720 	 * Therefore we call the LGR detection function here.
721 	 */
722 	lgr_info_log();
723 }
724 
725 static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
726 				      int dstat)
727 {
728 	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
729 
730 	if (cstat)
731 		goto error;
732 	if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
733 		goto error;
734 	if (!(dstat & DEV_STAT_DEV_END))
735 		goto error;
736 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
737 	return;
738 
739 error:
740 	DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
741 	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
742 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
743 }
744 
745 /* qdio interrupt handler */
746 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
747 		      struct irb *irb)
748 {
749 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
750 	struct subchannel_id schid;
751 	int cstat, dstat;
752 
753 	if (!intparm || !irq_ptr) {
754 		ccw_device_get_schid(cdev, &schid);
755 		DBF_ERROR("qint:%4x", schid.sch_no);
756 		return;
757 	}
758 
759 	if (irq_ptr->perf_stat_enabled)
760 		irq_ptr->perf_stat.qdio_int++;
761 
762 	if (IS_ERR(irb)) {
763 		DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
764 		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
765 		wake_up(&cdev->private->wait_q);
766 		return;
767 	}
768 	qdio_irq_check_sense(irq_ptr, irb);
769 	cstat = irb->scsw.cmd.cstat;
770 	dstat = irb->scsw.cmd.dstat;
771 
772 	switch (irq_ptr->state) {
773 	case QDIO_IRQ_STATE_INACTIVE:
774 		qdio_establish_handle_irq(irq_ptr, cstat, dstat);
775 		break;
776 	case QDIO_IRQ_STATE_CLEANUP:
777 		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
778 		break;
779 	case QDIO_IRQ_STATE_ESTABLISHED:
780 	case QDIO_IRQ_STATE_ACTIVE:
781 		if (cstat & SCHN_STAT_PCI) {
782 			qdio_int_handler_pci(irq_ptr);
783 			return;
784 		}
785 		if (cstat || dstat)
786 			qdio_handle_activate_check(irq_ptr, intparm, cstat,
787 						   dstat);
788 		break;
789 	case QDIO_IRQ_STATE_STOPPED:
790 		break;
791 	default:
792 		WARN_ON_ONCE(1);
793 	}
794 	wake_up(&cdev->private->wait_q);
795 }
796 
797 /**
798  * qdio_get_ssqd_desc - get qdio subchannel description
799  * @cdev: ccw device to get description for
800  * @data: where to store the ssqd
801  *
802  * Returns 0 or an error code. The results of the chsc are stored in the
803  * specified structure.
804  */
805 int qdio_get_ssqd_desc(struct ccw_device *cdev,
806 		       struct qdio_ssqd_desc *data)
807 {
808 	struct subchannel_id schid;
809 
810 	if (!cdev || !cdev->private)
811 		return -EINVAL;
812 
813 	ccw_device_get_schid(cdev, &schid);
814 	DBF_EVENT("get ssqd:%4x", schid.sch_no);
815 	return qdio_setup_get_ssqd(NULL, &schid, data);
816 }
817 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
818 
819 static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
820 {
821 	struct ccw_device *cdev = irq->cdev;
822 	long timeout;
823 	int rc;
824 
825 	spin_lock_irq(get_ccwdev_lock(cdev));
826 	qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
827 	if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
828 		rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
829 	else
830 		/* default behaviour is halt */
831 		rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
832 	spin_unlock_irq(get_ccwdev_lock(cdev));
833 	if (rc) {
834 		DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
835 		DBF_ERROR("rc:%4d", rc);
836 		return rc;
837 	}
838 
839 	timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
840 						   irq->state == QDIO_IRQ_STATE_INACTIVE ||
841 						   irq->state == QDIO_IRQ_STATE_ERR,
842 						   10 * HZ);
843 	if (timeout <= 0)
844 		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
845 
846 	return rc;
847 }
848 
849 /**
850  * qdio_shutdown - shut down a qdio subchannel
851  * @cdev: associated ccw device
852  * @how: use halt or clear to shutdown
853  */
854 int qdio_shutdown(struct ccw_device *cdev, int how)
855 {
856 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
857 	struct subchannel_id schid;
858 	int rc;
859 
860 	if (!irq_ptr)
861 		return -ENODEV;
862 
863 	WARN_ON_ONCE(irqs_disabled());
864 	ccw_device_get_schid(cdev, &schid);
865 	DBF_EVENT("qshutdown:%4x", schid.sch_no);
866 
867 	mutex_lock(&irq_ptr->setup_mutex);
868 	/*
869 	 * Subchannel was already shot down. We cannot prevent being called
870 	 * twice since cio may trigger a shutdown asynchronously.
871 	 */
872 	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
873 		mutex_unlock(&irq_ptr->setup_mutex);
874 		return 0;
875 	}
876 
877 	/*
878 	 * Indicate that the device is going down.
879 	 */
880 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
881 
882 	qdio_shutdown_debug_entries(irq_ptr);
883 
884 	rc = qdio_cancel_ccw(irq_ptr, how);
885 	qdio_shutdown_thinint(irq_ptr);
886 	qdio_shutdown_irq(irq_ptr);
887 
888 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
889 	mutex_unlock(&irq_ptr->setup_mutex);
890 	if (rc)
891 		return rc;
892 	return 0;
893 }
894 EXPORT_SYMBOL_GPL(qdio_shutdown);
895 
896 /**
897  * qdio_free - free data structures for a qdio subchannel
898  * @cdev: associated ccw device
899  */
900 int qdio_free(struct ccw_device *cdev)
901 {
902 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
903 	struct subchannel_id schid;
904 
905 	if (!irq_ptr)
906 		return -ENODEV;
907 
908 	ccw_device_get_schid(cdev, &schid);
909 	DBF_EVENT("qfree:%4x", schid.sch_no);
910 	DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
911 	mutex_lock(&irq_ptr->setup_mutex);
912 
913 	irq_ptr->debug_area = NULL;
914 	cdev->private->qdio_data = NULL;
915 	mutex_unlock(&irq_ptr->setup_mutex);
916 
917 	qdio_free_queues(irq_ptr);
918 	free_page((unsigned long) irq_ptr->qdr);
919 	free_page(irq_ptr->chsc_page);
920 	kfree(irq_ptr->ccw);
921 	free_page((unsigned long) irq_ptr);
922 	return 0;
923 }
924 EXPORT_SYMBOL_GPL(qdio_free);
925 
926 /**
927  * qdio_allocate - allocate qdio queues and associated data
928  * @cdev: associated ccw device
929  * @no_input_qs: allocate this number of Input Queues
930  * @no_output_qs: allocate this number of Output Queues
931  */
932 int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
933 		  unsigned int no_output_qs)
934 {
935 	struct subchannel_id schid;
936 	struct qdio_irq *irq_ptr;
937 	int rc = -ENOMEM;
938 
939 	ccw_device_get_schid(cdev, &schid);
940 	DBF_EVENT("qallocate:%4x", schid.sch_no);
941 
942 	if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
943 	    no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
944 		return -EINVAL;
945 
946 	irq_ptr = (void *) get_zeroed_page(GFP_KERNEL);
947 	if (!irq_ptr)
948 		return -ENOMEM;
949 
950 	irq_ptr->ccw = kmalloc(sizeof(*irq_ptr->ccw), GFP_KERNEL | GFP_DMA);
951 	if (!irq_ptr->ccw)
952 		goto err_ccw;
953 
954 	/* kmemleak doesn't scan the page-allocated irq_ptr: */
955 	kmemleak_not_leak(irq_ptr->ccw);
956 
957 	irq_ptr->cdev = cdev;
958 	mutex_init(&irq_ptr->setup_mutex);
959 	if (qdio_allocate_dbf(irq_ptr))
960 		goto err_dbf;
961 
962 	DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
963 		      no_output_qs);
964 
965 	/*
966 	 * Allocate a page for the chsc calls in qdio_establish.
967 	 * Must be pre-allocated since a zfcp recovery will call
968 	 * qdio_establish. In case of low memory and swap on a zfcp disk
969 	 * we may not be able to allocate memory otherwise.
970 	 */
971 	irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
972 	if (!irq_ptr->chsc_page)
973 		goto err_chsc;
974 
975 	/* qdr is used in ccw1.cda which is u32 */
976 	irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
977 	if (!irq_ptr->qdr)
978 		goto err_qdr;
979 
980 	rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
981 	if (rc)
982 		goto err_queues;
983 
984 	cdev->private->qdio_data = irq_ptr;
985 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
986 	return 0;
987 
988 err_queues:
989 	free_page((unsigned long) irq_ptr->qdr);
990 err_qdr:
991 	free_page(irq_ptr->chsc_page);
992 err_chsc:
993 err_dbf:
994 	kfree(irq_ptr->ccw);
995 err_ccw:
996 	free_page((unsigned long) irq_ptr);
997 	return rc;
998 }
999 EXPORT_SYMBOL_GPL(qdio_allocate);
1000 
1001 static void qdio_trace_init_data(struct qdio_irq *irq,
1002 				 struct qdio_initialize *data)
1003 {
1004 	DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
1005 	DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
1006 	DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
1007 	DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
1008 		      data->no_output_qs);
1009 	DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
1010 	DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
1011 	DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
1012 	DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
1013 	DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
1014 		    DBF_ERR);
1015 }
1016 
1017 /**
1018  * qdio_establish - establish queues on a qdio subchannel
1019  * @cdev: associated ccw device
1020  * @init_data: initialization data
1021  */
1022 int qdio_establish(struct ccw_device *cdev,
1023 		   struct qdio_initialize *init_data)
1024 {
1025 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1026 	struct subchannel_id schid;
1027 	struct ciw *ciw;
1028 	long timeout;
1029 	int rc;
1030 
1031 	ccw_device_get_schid(cdev, &schid);
1032 	DBF_EVENT("qestablish:%4x", schid.sch_no);
1033 
1034 	if (!irq_ptr)
1035 		return -ENODEV;
1036 
1037 	if (init_data->no_input_qs > irq_ptr->max_input_qs ||
1038 	    init_data->no_output_qs > irq_ptr->max_output_qs)
1039 		return -EINVAL;
1040 
1041 	/* Needed as error_handler: */
1042 	if (!init_data->input_handler)
1043 		return -EINVAL;
1044 
1045 	if (init_data->no_output_qs && !init_data->output_handler)
1046 		return -EINVAL;
1047 
1048 	if (!init_data->input_sbal_addr_array ||
1049 	    !init_data->output_sbal_addr_array)
1050 		return -EINVAL;
1051 
1052 	if (!init_data->irq_poll)
1053 		return -EINVAL;
1054 
1055 	ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE);
1056 	if (!ciw) {
1057 		DBF_ERROR("%4x NO EQ", schid.sch_no);
1058 		return -EIO;
1059 	}
1060 
1061 	mutex_lock(&irq_ptr->setup_mutex);
1062 	qdio_trace_init_data(irq_ptr, init_data);
1063 	qdio_setup_irq(irq_ptr, init_data);
1064 
1065 	rc = qdio_establish_thinint(irq_ptr);
1066 	if (rc)
1067 		goto err_thinint;
1068 
1069 	/* establish q */
1070 	irq_ptr->ccw->cmd_code = ciw->cmd;
1071 	irq_ptr->ccw->flags = CCW_FLAG_SLI;
1072 	irq_ptr->ccw->count = ciw->count;
1073 	irq_ptr->ccw->cda = (u32) virt_to_phys(irq_ptr->qdr);
1074 
1075 	spin_lock_irq(get_ccwdev_lock(cdev));
1076 	ccw_device_set_options_mask(cdev, 0);
1077 
1078 	rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1079 	spin_unlock_irq(get_ccwdev_lock(cdev));
1080 	if (rc) {
1081 		DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1082 		DBF_ERROR("rc:%4x", rc);
1083 		goto err_ccw_start;
1084 	}
1085 
1086 	timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
1087 						   irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1088 						   irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1089 	if (timeout <= 0) {
1090 		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1091 		goto err_ccw_timeout;
1092 	}
1093 
1094 	if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1095 		rc = -EIO;
1096 		goto err_ccw_error;
1097 	}
1098 
1099 	qdio_setup_ssqd_info(irq_ptr);
1100 
1101 	/* qebsm is now setup if available, initialize buffer states */
1102 	qdio_init_buf_states(irq_ptr);
1103 
1104 	mutex_unlock(&irq_ptr->setup_mutex);
1105 	qdio_print_subchannel_info(irq_ptr);
1106 	qdio_setup_debug_entries(irq_ptr);
1107 	return 0;
1108 
1109 err_ccw_timeout:
1110 	qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
1111 err_ccw_error:
1112 err_ccw_start:
1113 	qdio_shutdown_thinint(irq_ptr);
1114 err_thinint:
1115 	qdio_shutdown_irq(irq_ptr);
1116 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1117 	mutex_unlock(&irq_ptr->setup_mutex);
1118 	return rc;
1119 }
1120 EXPORT_SYMBOL_GPL(qdio_establish);
1121 
1122 /**
1123  * qdio_activate - activate queues on a qdio subchannel
1124  * @cdev: associated cdev
1125  */
1126 int qdio_activate(struct ccw_device *cdev)
1127 {
1128 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1129 	struct subchannel_id schid;
1130 	struct ciw *ciw;
1131 	int rc;
1132 
1133 	ccw_device_get_schid(cdev, &schid);
1134 	DBF_EVENT("qactivate:%4x", schid.sch_no);
1135 
1136 	if (!irq_ptr)
1137 		return -ENODEV;
1138 
1139 	ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE);
1140 	if (!ciw) {
1141 		DBF_ERROR("%4x NO AQ", schid.sch_no);
1142 		return -EIO;
1143 	}
1144 
1145 	mutex_lock(&irq_ptr->setup_mutex);
1146 	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1147 		rc = -EBUSY;
1148 		goto out;
1149 	}
1150 
1151 	irq_ptr->ccw->cmd_code = ciw->cmd;
1152 	irq_ptr->ccw->flags = CCW_FLAG_SLI;
1153 	irq_ptr->ccw->count = ciw->count;
1154 	irq_ptr->ccw->cda = 0;
1155 
1156 	spin_lock_irq(get_ccwdev_lock(cdev));
1157 	ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1158 
1159 	rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1160 			      0, DOIO_DENY_PREFETCH);
1161 	spin_unlock_irq(get_ccwdev_lock(cdev));
1162 	if (rc) {
1163 		DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1164 		DBF_ERROR("rc:%4x", rc);
1165 		goto out;
1166 	}
1167 
1168 	/* wait for subchannel to become active */
1169 	msleep(5);
1170 
1171 	switch (irq_ptr->state) {
1172 	case QDIO_IRQ_STATE_STOPPED:
1173 	case QDIO_IRQ_STATE_ERR:
1174 		rc = -EIO;
1175 		break;
1176 	default:
1177 		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1178 		rc = 0;
1179 	}
1180 out:
1181 	mutex_unlock(&irq_ptr->setup_mutex);
1182 	return rc;
1183 }
1184 EXPORT_SYMBOL_GPL(qdio_activate);
1185 
1186 /**
1187  * handle_inbound - reset processed input buffers
1188  * @q: queue containing the buffers
1189  * @bufnr: first buffer to process
1190  * @count: how many buffers are emptied
1191  */
1192 static int handle_inbound(struct qdio_q *q, int bufnr, int count)
1193 {
1194 	int overlap;
1195 
1196 	qperf_inc(q, inbound_call);
1197 
1198 	/* If any processed SBALs are returned to HW, adjust our tracking: */
1199 	overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
1200 			     q->u.in.batch_count);
1201 	if (overlap > 0) {
1202 		q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
1203 		q->u.in.batch_count -= overlap;
1204 	}
1205 
1206 	count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1207 	atomic_add(count, &q->nr_buf_used);
1208 
1209 	if (qdio_need_siga_in(q->irq_ptr))
1210 		return qdio_siga_input(q);
1211 
1212 	return 0;
1213 }
1214 
1215 /**
1216  * qdio_add_bufs_to_input_queue - process buffers on an Input Queue
1217  * @cdev: associated ccw_device for the qdio subchannel
1218  * @q_nr: queue number
1219  * @bufnr: buffer number
1220  * @count: how many buffers to process
1221  */
1222 int qdio_add_bufs_to_input_queue(struct ccw_device *cdev, unsigned int q_nr,
1223 				 unsigned int bufnr, unsigned int count)
1224 {
1225 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1226 
1227 	if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1228 		return -EINVAL;
1229 
1230 	if (!irq_ptr)
1231 		return -ENODEV;
1232 
1233 	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addi b:%02x c:%02x", bufnr, count);
1234 
1235 	if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1236 		return -EIO;
1237 	if (!count)
1238 		return 0;
1239 
1240 	return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count);
1241 }
1242 EXPORT_SYMBOL_GPL(qdio_add_bufs_to_input_queue);
1243 
1244 /**
1245  * handle_outbound - process filled outbound buffers
1246  * @q: queue containing the buffers
1247  * @bufnr: first buffer to process
1248  * @count: how many buffers are filled
1249  * @aob: asynchronous operation block
1250  */
1251 static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count,
1252 			   struct qaob *aob)
1253 {
1254 	unsigned char state = 0;
1255 	int used, rc = 0;
1256 
1257 	qperf_inc(q, outbound_call);
1258 
1259 	count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1260 	used = atomic_add_return(count, &q->nr_buf_used);
1261 
1262 	if (used == QDIO_MAX_BUFFERS_PER_Q)
1263 		qperf_inc(q, outbound_queue_full);
1264 
1265 	if (queue_type(q) == QDIO_IQDIO_QFMT) {
1266 		unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
1267 
1268 		WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
1269 		rc = qdio_kick_outbound_q(q, count, phys_aob);
1270 	} else if (qdio_need_siga_sync(q->irq_ptr)) {
1271 		rc = qdio_sync_output_queue(q);
1272 	} else if (count < QDIO_MAX_BUFFERS_PER_Q &&
1273 		   get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
1274 		   state == SLSB_CU_OUTPUT_PRIMED) {
1275 		/* The previous buffer is not processed yet, tack on. */
1276 		qperf_inc(q, fast_requeue);
1277 	} else {
1278 		rc = qdio_kick_outbound_q(q, count, 0);
1279 	}
1280 
1281 	return rc;
1282 }
1283 
1284 /**
1285  * qdio_add_bufs_to_output_queue - process buffers on an Output Queue
1286  * @cdev: associated ccw_device for the qdio subchannel
1287  * @q_nr: queue number
1288  * @bufnr: buffer number
1289  * @count: how many buffers to process
1290  * @aob: asynchronous operation block
1291  */
1292 int qdio_add_bufs_to_output_queue(struct ccw_device *cdev, unsigned int q_nr,
1293 				  unsigned int bufnr, unsigned int count,
1294 				  struct qaob *aob)
1295 {
1296 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1297 
1298 	if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1299 		return -EINVAL;
1300 
1301 	if (!irq_ptr)
1302 		return -ENODEV;
1303 
1304 	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addo b:%02x c:%02x", bufnr, count);
1305 
1306 	if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1307 		return -EIO;
1308 	if (!count)
1309 		return 0;
1310 
1311 	return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob);
1312 }
1313 EXPORT_SYMBOL_GPL(qdio_add_bufs_to_output_queue);
1314 
1315 /**
1316  * qdio_start_irq - enable interrupt processing for the device
1317  * @cdev: associated ccw_device for the qdio subchannel
1318  *
1319  * Return codes
1320  *   0 - success
1321  *   1 - irqs not started since new data is available
1322  */
1323 int qdio_start_irq(struct ccw_device *cdev)
1324 {
1325 	struct qdio_q *q;
1326 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1327 	unsigned int i;
1328 
1329 	if (!irq_ptr)
1330 		return -ENODEV;
1331 
1332 	for_each_input_queue(irq_ptr, q, i)
1333 		qdio_stop_polling(q);
1334 
1335 	clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
1336 
1337 	/*
1338 	 * We need to check again to not lose initiative after
1339 	 * resetting the ACK state.
1340 	 */
1341 	if (test_nonshared_ind(irq_ptr))
1342 		goto rescan;
1343 
1344 	for_each_input_queue(irq_ptr, q, i) {
1345 		if (!qdio_inbound_q_done(q, q->first_to_check))
1346 			goto rescan;
1347 	}
1348 
1349 	return 0;
1350 
1351 rescan:
1352 	if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1353 		return 0;
1354 	else
1355 		return 1;
1356 
1357 }
1358 EXPORT_SYMBOL(qdio_start_irq);
1359 
1360 /**
1361  * qdio_stop_irq - disable interrupt processing for the device
1362  * @cdev: associated ccw_device for the qdio subchannel
1363  *
1364  * Return codes
1365  *   0 - interrupts were already disabled
1366  *   1 - interrupts successfully disabled
1367  */
1368 int qdio_stop_irq(struct ccw_device *cdev)
1369 {
1370 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1371 
1372 	if (!irq_ptr)
1373 		return -ENODEV;
1374 
1375 	if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1376 		return 0;
1377 	else
1378 		return 1;
1379 }
1380 EXPORT_SYMBOL(qdio_stop_irq);
1381 
1382 static int __init init_QDIO(void)
1383 {
1384 	int rc;
1385 
1386 	rc = qdio_debug_init();
1387 	if (rc)
1388 		return rc;
1389 	rc = qdio_setup_init();
1390 	if (rc)
1391 		goto out_debug;
1392 	rc = qdio_thinint_init();
1393 	if (rc)
1394 		goto out_cache;
1395 	return 0;
1396 
1397 out_cache:
1398 	qdio_setup_exit();
1399 out_debug:
1400 	qdio_debug_exit();
1401 	return rc;
1402 }
1403 
1404 static void __exit exit_QDIO(void)
1405 {
1406 	qdio_thinint_exit();
1407 	qdio_setup_exit();
1408 	qdio_debug_exit();
1409 }
1410 
1411 module_init(init_QDIO);
1412 module_exit(exit_QDIO);
1413