xref: /linux/drivers/s390/crypto/ap_queue.c (revision 169ebcbb90829bec0429ff9f6012a0313169e45f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2016, 2023
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  *
6  * Adjunct processor bus, queue related code.
7  */
8 
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <asm/facility.h>
16 
17 #define CREATE_TRACE_POINTS
18 #include <asm/trace/ap.h>
19 
20 #include "ap_bus.h"
21 #include "ap_debug.h"
22 
23 EXPORT_TRACEPOINT_SYMBOL(s390_ap_nqap);
24 EXPORT_TRACEPOINT_SYMBOL(s390_ap_dqap);
25 
26 static void __ap_flush_queue(struct ap_queue *aq);
27 
28 /*
29  * some AP queue helper functions
30  */
31 
32 static inline bool ap_q_supported_in_se(struct ap_queue *aq)
33 {
34 	return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
35 }
36 
37 static inline bool ap_q_supports_bind(struct ap_queue *aq)
38 {
39 	return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
40 }
41 
42 static inline bool ap_q_supports_assoc(struct ap_queue *aq)
43 {
44 	return aq->card->hwinfo.ep11;
45 }
46 
47 static inline bool ap_q_needs_bind(struct ap_queue *aq)
48 {
49 	return ap_q_supports_bind(aq) && ap_sb_available();
50 }
51 
52 /**
53  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
54  * @aq: The AP queue
55  * @ind: the notification indicator byte
56  *
57  * Enables interruption on AP queue via ap_aqic(). Based on the return
58  * value it waits a while and tests the AP queue if interrupts
59  * have been switched on using ap_test_queue().
60  */
61 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
62 {
63 	union ap_qirq_ctrl qirqctrl = { .value = 0 };
64 	struct ap_queue_status status;
65 
66 	qirqctrl.ir = 1;
67 	qirqctrl.isc = AP_ISC;
68 	status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
69 	if (status.async)
70 		return -EPERM;
71 	switch (status.response_code) {
72 	case AP_RESPONSE_NORMAL:
73 	case AP_RESPONSE_OTHERWISE_CHANGED:
74 		return 0;
75 	case AP_RESPONSE_Q_NOT_AVAIL:
76 	case AP_RESPONSE_DECONFIGURED:
77 	case AP_RESPONSE_CHECKSTOPPED:
78 	case AP_RESPONSE_INVALID_ADDRESS:
79 		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
80 		       AP_QID_CARD(aq->qid),
81 		       AP_QID_QUEUE(aq->qid));
82 		return -EOPNOTSUPP;
83 	case AP_RESPONSE_RESET_IN_PROGRESS:
84 	case AP_RESPONSE_BUSY:
85 	default:
86 		return -EBUSY;
87 	}
88 }
89 
90 /**
91  * __ap_send(): Send message to adjunct processor queue.
92  * @qid: The AP queue number
93  * @psmid: The program supplied message identifier
94  * @msg: The message text
95  * @msglen: The message length
96  * @special: Special Bit
97  *
98  * Returns AP queue status structure.
99  * Condition code 1 on NQAP can't happen because the L bit is 1.
100  * Condition code 2 on NQAP also means the send is incomplete,
101  * because a segment boundary was reached. The NQAP is repeated.
102  */
103 static inline struct ap_queue_status
104 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
105 	  int special)
106 {
107 	struct ap_queue_status status;
108 
109 	if (special)
110 		qid |= 0x400000UL;
111 
112 	status = ap_nqap(qid, psmid, msg, msglen);
113 
114 	trace_s390_ap_nqap(AP_QID_CARD(qid), AP_QID_QUEUE(qid),
115 			   status.value, psmid);
116 
117 	return status;
118 }
119 
120 /* State machine definitions and helpers */
121 
122 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
123 {
124 	return AP_SM_WAIT_NONE;
125 }
126 
127 /**
128  * ap_sm_recv(): Receive pending reply messages from an AP queue but do
129  *	not change the state of the device.
130  * @aq: pointer to the AP queue
131  *
132  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
133  */
134 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
135 {
136 	struct ap_queue_status status;
137 	struct ap_message *ap_msg;
138 	bool found = false;
139 	size_t reslen;
140 	unsigned long resgr0 = 0;
141 	int parts = 0;
142 
143 	/*
144 	 * DQAP loop until response code and resgr0 indicate that
145 	 * the msg is totally received. As we use the very same buffer
146 	 * the msg is overwritten with each invocation. That's intended
147 	 * and the receiver of the msg is informed with a msg rc code
148 	 * of EMSGSIZE in such a case.
149 	 */
150 	do {
151 		status = ap_dqap(aq->qid, &aq->reply->psmid,
152 				 aq->reply->msg, aq->reply->bufsize,
153 				 &aq->reply->len, &reslen, &resgr0);
154 		parts++;
155 	} while (status.response_code == 0xFF && resgr0 != 0);
156 
157 	trace_s390_ap_dqap(AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
158 			   status.value, aq->reply->psmid);
159 
160 	switch (status.response_code) {
161 	case AP_RESPONSE_NORMAL:
162 		print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
163 				     aq->reply->msg, aq->reply->len, false);
164 		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
165 		if (!status.queue_empty && !aq->queue_count)
166 			aq->queue_count++;
167 		if (aq->queue_count > 0)
168 			mod_timer(&aq->timeout,
169 				  jiffies + aq->request_timeout);
170 		list_for_each_entry(ap_msg, &aq->pendingq, list) {
171 			if (ap_msg->psmid != aq->reply->psmid)
172 				continue;
173 			list_del_init(&ap_msg->list);
174 			aq->pendingq_count--;
175 			if (parts > 1) {
176 				ap_msg->rc = -EMSGSIZE;
177 				ap_msg->receive(aq, ap_msg, NULL);
178 			} else {
179 				ap_msg->receive(aq, ap_msg, aq->reply);
180 			}
181 			found = true;
182 			break;
183 		}
184 		if (!found) {
185 			AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
186 				    __func__, aq->reply->psmid,
187 				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
188 		}
189 		fallthrough;
190 	case AP_RESPONSE_NO_PENDING_REPLY:
191 		if (!status.queue_empty || aq->queue_count <= 0)
192 			break;
193 		/* The card shouldn't forget requests but who knows. */
194 		aq->queue_count = 0;
195 		list_splice_init(&aq->pendingq, &aq->requestq);
196 		aq->requestq_count += aq->pendingq_count;
197 		pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
198 			 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
199 			 aq->pendingq_count, aq->requestq_count);
200 		aq->pendingq_count = 0;
201 		break;
202 	default:
203 		break;
204 	}
205 	return status;
206 }
207 
208 /**
209  * ap_sm_read(): Receive pending reply messages from an AP queue.
210  * @aq: pointer to the AP queue
211  *
212  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
213  */
214 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
215 {
216 	struct ap_queue_status status;
217 
218 	if (!aq->reply)
219 		return AP_SM_WAIT_NONE;
220 	status = ap_sm_recv(aq);
221 	if (status.async)
222 		return AP_SM_WAIT_NONE;
223 	switch (status.response_code) {
224 	case AP_RESPONSE_NORMAL:
225 		if (aq->queue_count > 0) {
226 			aq->sm_state = AP_SM_STATE_WORKING;
227 			return AP_SM_WAIT_AGAIN;
228 		}
229 		aq->sm_state = AP_SM_STATE_IDLE;
230 		break;
231 	case AP_RESPONSE_NO_PENDING_REPLY:
232 		if (aq->queue_count > 0)
233 			return status.irq_enabled ?
234 				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
235 		aq->sm_state = AP_SM_STATE_IDLE;
236 		break;
237 	default:
238 		aq->dev_state = AP_DEV_STATE_ERROR;
239 		aq->last_err_rc = status.response_code;
240 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
241 			    __func__, status.response_code,
242 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
243 		return AP_SM_WAIT_NONE;
244 	}
245 	/* Check and maybe enable irq support (again) on this queue */
246 	if (!status.irq_enabled && status.queue_empty) {
247 		void *lsi_ptr = ap_airq_ptr();
248 
249 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
250 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
251 			return AP_SM_WAIT_AGAIN;
252 		}
253 	}
254 	return AP_SM_WAIT_NONE;
255 }
256 
257 /**
258  * ap_sm_write(): Send messages from the request queue to an AP queue.
259  * @aq: pointer to the AP queue
260  *
261  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
262  */
263 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
264 {
265 	struct ap_queue_status status;
266 	struct ap_message *ap_msg;
267 	ap_qid_t qid = aq->qid;
268 
269 	if (aq->requestq_count <= 0)
270 		return AP_SM_WAIT_NONE;
271 
272 	/* Start the next request on the queue. */
273 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
274 	print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1,
275 			     ap_msg->msg, ap_msg->len, false);
276 	status = __ap_send(qid, ap_msg->psmid,
277 			   ap_msg->msg, ap_msg->len,
278 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
279 	if (status.async)
280 		return AP_SM_WAIT_NONE;
281 	switch (status.response_code) {
282 	case AP_RESPONSE_NORMAL:
283 		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
284 		if (aq->queue_count == 1)
285 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
286 		list_move_tail(&ap_msg->list, &aq->pendingq);
287 		aq->requestq_count--;
288 		aq->pendingq_count++;
289 		if (aq->queue_count < aq->card->hwinfo.qd) {
290 			aq->sm_state = AP_SM_STATE_WORKING;
291 			return AP_SM_WAIT_AGAIN;
292 		}
293 		fallthrough;
294 	case AP_RESPONSE_Q_FULL:
295 		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
296 		return status.irq_enabled ?
297 			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
298 	case AP_RESPONSE_RESET_IN_PROGRESS:
299 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
300 		return AP_SM_WAIT_LOW_TIMEOUT;
301 	case AP_RESPONSE_INVALID_DOMAIN:
302 		AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
303 		fallthrough;
304 	case AP_RESPONSE_MESSAGE_TOO_BIG:
305 	case AP_RESPONSE_REQ_FAC_NOT_INST:
306 		list_del_init(&ap_msg->list);
307 		aq->requestq_count--;
308 		ap_msg->rc = -EINVAL;
309 		ap_msg->receive(aq, ap_msg, NULL);
310 		return AP_SM_WAIT_AGAIN;
311 	default:
312 		aq->dev_state = AP_DEV_STATE_ERROR;
313 		aq->last_err_rc = status.response_code;
314 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
315 			    __func__, status.response_code,
316 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
317 		return AP_SM_WAIT_NONE;
318 	}
319 }
320 
321 /**
322  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
323  * @aq: pointer to the AP queue
324  *
325  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
326  */
327 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
328 {
329 	return min(ap_sm_read(aq), ap_sm_write(aq));
330 }
331 
332 /**
333  * ap_sm_reset(): Reset an AP queue.
334  * @aq: The AP queue
335  *
336  * Submit the Reset command to an AP queue.
337  */
338 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
339 {
340 	struct ap_queue_status status;
341 
342 	status = ap_rapq(aq->qid, aq->rapq_fbit);
343 	if (status.async)
344 		return AP_SM_WAIT_NONE;
345 	switch (status.response_code) {
346 	case AP_RESPONSE_NORMAL:
347 	case AP_RESPONSE_RESET_IN_PROGRESS:
348 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
349 		aq->rapq_fbit = 0;
350 		return AP_SM_WAIT_LOW_TIMEOUT;
351 	default:
352 		aq->dev_state = AP_DEV_STATE_ERROR;
353 		aq->last_err_rc = status.response_code;
354 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
355 			    __func__, status.response_code,
356 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
357 		return AP_SM_WAIT_NONE;
358 	}
359 }
360 
361 /**
362  * ap_sm_reset_wait(): Test queue for completion of the reset operation
363  * @aq: pointer to the AP queue
364  *
365  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
366  */
367 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
368 {
369 	struct ap_queue_status status;
370 	struct ap_tapq_hwinfo hwinfo;
371 	void *lsi_ptr;
372 
373 	/* Get the status with TAPQ */
374 	status = ap_test_queue(aq->qid, 1, &hwinfo);
375 
376 	switch (status.response_code) {
377 	case AP_RESPONSE_NORMAL:
378 		aq->se_bstate = hwinfo.bs;
379 		lsi_ptr = ap_airq_ptr();
380 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
381 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
382 		else
383 			aq->sm_state = (aq->queue_count > 0) ?
384 				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
385 		return AP_SM_WAIT_AGAIN;
386 	case AP_RESPONSE_BUSY:
387 	case AP_RESPONSE_RESET_IN_PROGRESS:
388 		return AP_SM_WAIT_LOW_TIMEOUT;
389 	case AP_RESPONSE_Q_NOT_AVAIL:
390 	case AP_RESPONSE_DECONFIGURED:
391 	case AP_RESPONSE_CHECKSTOPPED:
392 	default:
393 		aq->dev_state = AP_DEV_STATE_ERROR;
394 		aq->last_err_rc = status.response_code;
395 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
396 			    __func__, status.response_code,
397 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
398 		return AP_SM_WAIT_NONE;
399 	}
400 }
401 
402 /**
403  * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
404  * @aq: pointer to the AP queue
405  *
406  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
407  */
408 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
409 {
410 	struct ap_queue_status status;
411 
412 	if (aq->queue_count > 0 && aq->reply)
413 		/* Try to read a completed message and get the status */
414 		status = ap_sm_recv(aq);
415 	else
416 		/* Get the status with TAPQ */
417 		status = ap_tapq(aq->qid, NULL);
418 
419 	if (status.irq_enabled == 1) {
420 		/* Irqs are now enabled */
421 		aq->sm_state = (aq->queue_count > 0) ?
422 			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
423 	}
424 
425 	switch (status.response_code) {
426 	case AP_RESPONSE_NORMAL:
427 		if (aq->queue_count > 0)
428 			return AP_SM_WAIT_AGAIN;
429 		fallthrough;
430 	case AP_RESPONSE_NO_PENDING_REPLY:
431 		return AP_SM_WAIT_LOW_TIMEOUT;
432 	default:
433 		aq->dev_state = AP_DEV_STATE_ERROR;
434 		aq->last_err_rc = status.response_code;
435 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
436 			    __func__, status.response_code,
437 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
438 		return AP_SM_WAIT_NONE;
439 	}
440 }
441 
442 /**
443  * ap_sm_assoc_wait(): Test queue for completion of a pending
444  *		       association request.
445  * @aq: pointer to the AP queue
446  */
447 static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
448 {
449 	struct ap_queue_status status;
450 	struct ap_tapq_hwinfo hwinfo;
451 
452 	status = ap_test_queue(aq->qid, 1, &hwinfo);
453 	/* handle asynchronous error on this queue */
454 	if (status.async && status.response_code) {
455 		aq->dev_state = AP_DEV_STATE_ERROR;
456 		aq->last_err_rc = status.response_code;
457 		AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
458 			    __func__, status.response_code,
459 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
460 		return AP_SM_WAIT_NONE;
461 	}
462 	if (status.response_code > AP_RESPONSE_BUSY) {
463 		aq->dev_state = AP_DEV_STATE_ERROR;
464 		aq->last_err_rc = status.response_code;
465 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
466 			    __func__, status.response_code,
467 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
468 		return AP_SM_WAIT_NONE;
469 	}
470 
471 	/* update queue's SE bind state */
472 	aq->se_bstate = hwinfo.bs;
473 
474 	/* check bs bits */
475 	switch (hwinfo.bs) {
476 	case AP_BS_Q_USABLE:
477 		/* association is through */
478 		aq->sm_state = AP_SM_STATE_IDLE;
479 		pr_debug("queue 0x%02x.%04x associated with %u\n",
480 			 AP_QID_CARD(aq->qid),
481 			 AP_QID_QUEUE(aq->qid), aq->assoc_idx);
482 		return AP_SM_WAIT_NONE;
483 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
484 		/* association still pending */
485 		return AP_SM_WAIT_LOW_TIMEOUT;
486 	default:
487 		/* reset from 'outside' happened or no idea at all */
488 		aq->assoc_idx = ASSOC_IDX_INVALID;
489 		aq->dev_state = AP_DEV_STATE_ERROR;
490 		aq->last_err_rc = status.response_code;
491 		AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
492 			    __func__, hwinfo.bs,
493 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
494 		return AP_SM_WAIT_NONE;
495 	}
496 }
497 
498 /*
499  * AP state machine jump table
500  */
501 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
502 	[AP_SM_STATE_RESET_START] = {
503 		[AP_SM_EVENT_POLL] = ap_sm_reset,
504 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
505 	},
506 	[AP_SM_STATE_RESET_WAIT] = {
507 		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
508 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
509 	},
510 	[AP_SM_STATE_SETIRQ_WAIT] = {
511 		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
512 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
513 	},
514 	[AP_SM_STATE_IDLE] = {
515 		[AP_SM_EVENT_POLL] = ap_sm_write,
516 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
517 	},
518 	[AP_SM_STATE_WORKING] = {
519 		[AP_SM_EVENT_POLL] = ap_sm_read_write,
520 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
521 	},
522 	[AP_SM_STATE_QUEUE_FULL] = {
523 		[AP_SM_EVENT_POLL] = ap_sm_read,
524 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
525 	},
526 	[AP_SM_STATE_ASSOC_WAIT] = {
527 		[AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
528 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
529 	},
530 };
531 
532 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
533 {
534 	if (aq->config && !aq->chkstop &&
535 	    aq->dev_state > AP_DEV_STATE_UNINITIATED)
536 		return ap_jumptable[aq->sm_state][event](aq);
537 	else
538 		return AP_SM_WAIT_NONE;
539 }
540 
541 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
542 {
543 	enum ap_sm_wait wait;
544 
545 	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
546 		;
547 	return wait;
548 }
549 
550 /*
551  * AP queue related attributes.
552  */
553 static ssize_t request_count_show(struct device *dev,
554 				  struct device_attribute *attr,
555 				  char *buf)
556 {
557 	struct ap_queue *aq = to_ap_queue(dev);
558 	bool valid = false;
559 	u64 req_cnt;
560 
561 	spin_lock_bh(&aq->lock);
562 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
563 		req_cnt = aq->total_request_count;
564 		valid = true;
565 	}
566 	spin_unlock_bh(&aq->lock);
567 
568 	if (valid)
569 		return sysfs_emit(buf, "%llu\n", req_cnt);
570 	else
571 		return sysfs_emit(buf, "-\n");
572 }
573 
574 static ssize_t request_count_store(struct device *dev,
575 				   struct device_attribute *attr,
576 				   const char *buf, size_t count)
577 {
578 	struct ap_queue *aq = to_ap_queue(dev);
579 
580 	spin_lock_bh(&aq->lock);
581 	aq->total_request_count = 0;
582 	spin_unlock_bh(&aq->lock);
583 
584 	return count;
585 }
586 
587 static DEVICE_ATTR_RW(request_count);
588 
589 static ssize_t requestq_count_show(struct device *dev,
590 				   struct device_attribute *attr, char *buf)
591 {
592 	struct ap_queue *aq = to_ap_queue(dev);
593 	unsigned int reqq_cnt = 0;
594 
595 	spin_lock_bh(&aq->lock);
596 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
597 		reqq_cnt = aq->requestq_count;
598 	spin_unlock_bh(&aq->lock);
599 	return sysfs_emit(buf, "%d\n", reqq_cnt);
600 }
601 
602 static DEVICE_ATTR_RO(requestq_count);
603 
604 static ssize_t pendingq_count_show(struct device *dev,
605 				   struct device_attribute *attr, char *buf)
606 {
607 	struct ap_queue *aq = to_ap_queue(dev);
608 	unsigned int penq_cnt = 0;
609 
610 	spin_lock_bh(&aq->lock);
611 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
612 		penq_cnt = aq->pendingq_count;
613 	spin_unlock_bh(&aq->lock);
614 	return sysfs_emit(buf, "%d\n", penq_cnt);
615 }
616 
617 static DEVICE_ATTR_RO(pendingq_count);
618 
619 static ssize_t reset_show(struct device *dev,
620 			  struct device_attribute *attr, char *buf)
621 {
622 	struct ap_queue *aq = to_ap_queue(dev);
623 	int rc = 0;
624 
625 	spin_lock_bh(&aq->lock);
626 	switch (aq->sm_state) {
627 	case AP_SM_STATE_RESET_START:
628 	case AP_SM_STATE_RESET_WAIT:
629 		rc = sysfs_emit(buf, "Reset in progress.\n");
630 		break;
631 	case AP_SM_STATE_WORKING:
632 	case AP_SM_STATE_QUEUE_FULL:
633 		rc = sysfs_emit(buf, "Reset Timer armed.\n");
634 		break;
635 	default:
636 		rc = sysfs_emit(buf, "No Reset Timer set.\n");
637 	}
638 	spin_unlock_bh(&aq->lock);
639 	return rc;
640 }
641 
642 static ssize_t reset_store(struct device *dev,
643 			   struct device_attribute *attr,
644 			   const char *buf, size_t count)
645 {
646 	struct ap_queue *aq = to_ap_queue(dev);
647 
648 	spin_lock_bh(&aq->lock);
649 	__ap_flush_queue(aq);
650 	aq->sm_state = AP_SM_STATE_RESET_START;
651 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
652 	spin_unlock_bh(&aq->lock);
653 
654 	AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
655 		    __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
656 
657 	return count;
658 }
659 
660 static DEVICE_ATTR_RW(reset);
661 
662 static ssize_t interrupt_show(struct device *dev,
663 			      struct device_attribute *attr, char *buf)
664 {
665 	struct ap_queue *aq = to_ap_queue(dev);
666 	struct ap_queue_status status;
667 	int rc = 0;
668 
669 	spin_lock_bh(&aq->lock);
670 	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
671 		rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
672 	} else {
673 		status = ap_tapq(aq->qid, NULL);
674 		if (status.irq_enabled)
675 			rc = sysfs_emit(buf, "Interrupts enabled.\n");
676 		else
677 			rc = sysfs_emit(buf, "Interrupts disabled.\n");
678 	}
679 	spin_unlock_bh(&aq->lock);
680 
681 	return rc;
682 }
683 
684 static DEVICE_ATTR_RO(interrupt);
685 
686 static ssize_t config_show(struct device *dev,
687 			   struct device_attribute *attr, char *buf)
688 {
689 	struct ap_queue *aq = to_ap_queue(dev);
690 	int rc;
691 
692 	spin_lock_bh(&aq->lock);
693 	rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
694 	spin_unlock_bh(&aq->lock);
695 	return rc;
696 }
697 
698 static DEVICE_ATTR_RO(config);
699 
700 static ssize_t chkstop_show(struct device *dev,
701 			    struct device_attribute *attr, char *buf)
702 {
703 	struct ap_queue *aq = to_ap_queue(dev);
704 	int rc;
705 
706 	spin_lock_bh(&aq->lock);
707 	rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
708 	spin_unlock_bh(&aq->lock);
709 	return rc;
710 }
711 
712 static DEVICE_ATTR_RO(chkstop);
713 
714 static ssize_t ap_functions_show(struct device *dev,
715 				 struct device_attribute *attr, char *buf)
716 {
717 	struct ap_queue *aq = to_ap_queue(dev);
718 	struct ap_queue_status status;
719 	struct ap_tapq_hwinfo hwinfo;
720 
721 	status = ap_test_queue(aq->qid, 1, &hwinfo);
722 	if (status.response_code > AP_RESPONSE_BUSY) {
723 		pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
724 			 status.response_code,
725 			 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
726 		return -EIO;
727 	}
728 
729 	return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
730 }
731 
732 static DEVICE_ATTR_RO(ap_functions);
733 
734 #ifdef CONFIG_AP_DEBUG
735 static ssize_t states_show(struct device *dev,
736 			   struct device_attribute *attr, char *buf)
737 {
738 	struct ap_queue *aq = to_ap_queue(dev);
739 	int rc = 0;
740 
741 	spin_lock_bh(&aq->lock);
742 	/* queue device state */
743 	switch (aq->dev_state) {
744 	case AP_DEV_STATE_UNINITIATED:
745 		rc = sysfs_emit(buf, "UNINITIATED\n");
746 		break;
747 	case AP_DEV_STATE_OPERATING:
748 		rc = sysfs_emit(buf, "OPERATING");
749 		break;
750 	case AP_DEV_STATE_SHUTDOWN:
751 		rc = sysfs_emit(buf, "SHUTDOWN");
752 		break;
753 	case AP_DEV_STATE_ERROR:
754 		rc = sysfs_emit(buf, "ERROR");
755 		break;
756 	default:
757 		rc = sysfs_emit(buf, "UNKNOWN");
758 	}
759 	/* state machine state */
760 	if (aq->dev_state) {
761 		switch (aq->sm_state) {
762 		case AP_SM_STATE_RESET_START:
763 			rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
764 			break;
765 		case AP_SM_STATE_RESET_WAIT:
766 			rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
767 			break;
768 		case AP_SM_STATE_SETIRQ_WAIT:
769 			rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
770 			break;
771 		case AP_SM_STATE_IDLE:
772 			rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
773 			break;
774 		case AP_SM_STATE_WORKING:
775 			rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
776 			break;
777 		case AP_SM_STATE_QUEUE_FULL:
778 			rc += sysfs_emit_at(buf, rc, " [FULL]\n");
779 			break;
780 		case AP_SM_STATE_ASSOC_WAIT:
781 			rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
782 			break;
783 		default:
784 			rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
785 		}
786 	}
787 	spin_unlock_bh(&aq->lock);
788 
789 	return rc;
790 }
791 static DEVICE_ATTR_RO(states);
792 
793 static ssize_t last_err_rc_show(struct device *dev,
794 				struct device_attribute *attr, char *buf)
795 {
796 	struct ap_queue *aq = to_ap_queue(dev);
797 	int rc;
798 
799 	spin_lock_bh(&aq->lock);
800 	rc = aq->last_err_rc;
801 	spin_unlock_bh(&aq->lock);
802 
803 	switch (rc) {
804 	case AP_RESPONSE_NORMAL:
805 		return sysfs_emit(buf, "NORMAL\n");
806 	case AP_RESPONSE_Q_NOT_AVAIL:
807 		return sysfs_emit(buf, "Q_NOT_AVAIL\n");
808 	case AP_RESPONSE_RESET_IN_PROGRESS:
809 		return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
810 	case AP_RESPONSE_DECONFIGURED:
811 		return sysfs_emit(buf, "DECONFIGURED\n");
812 	case AP_RESPONSE_CHECKSTOPPED:
813 		return sysfs_emit(buf, "CHECKSTOPPED\n");
814 	case AP_RESPONSE_BUSY:
815 		return sysfs_emit(buf, "BUSY\n");
816 	case AP_RESPONSE_INVALID_ADDRESS:
817 		return sysfs_emit(buf, "INVALID_ADDRESS\n");
818 	case AP_RESPONSE_OTHERWISE_CHANGED:
819 		return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
820 	case AP_RESPONSE_Q_FULL:
821 		return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
822 	case AP_RESPONSE_INDEX_TOO_BIG:
823 		return sysfs_emit(buf, "INDEX_TOO_BIG\n");
824 	case AP_RESPONSE_NO_FIRST_PART:
825 		return sysfs_emit(buf, "NO_FIRST_PART\n");
826 	case AP_RESPONSE_MESSAGE_TOO_BIG:
827 		return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
828 	case AP_RESPONSE_REQ_FAC_NOT_INST:
829 		return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
830 	default:
831 		return sysfs_emit(buf, "response code %d\n", rc);
832 	}
833 }
834 static DEVICE_ATTR_RO(last_err_rc);
835 #endif
836 
837 static struct attribute *ap_queue_dev_attrs[] = {
838 	&dev_attr_request_count.attr,
839 	&dev_attr_requestq_count.attr,
840 	&dev_attr_pendingq_count.attr,
841 	&dev_attr_reset.attr,
842 	&dev_attr_interrupt.attr,
843 	&dev_attr_config.attr,
844 	&dev_attr_chkstop.attr,
845 	&dev_attr_ap_functions.attr,
846 #ifdef CONFIG_AP_DEBUG
847 	&dev_attr_states.attr,
848 	&dev_attr_last_err_rc.attr,
849 #endif
850 	NULL
851 };
852 
853 static struct attribute_group ap_queue_dev_attr_group = {
854 	.attrs = ap_queue_dev_attrs
855 };
856 
857 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
858 	&ap_queue_dev_attr_group,
859 	NULL
860 };
861 
862 static struct device_type ap_queue_type = {
863 	.name = "ap_queue",
864 	.groups = ap_queue_dev_attr_groups,
865 };
866 
867 static ssize_t se_bind_show(struct device *dev,
868 			    struct device_attribute *attr, char *buf)
869 {
870 	struct ap_queue *aq = to_ap_queue(dev);
871 	struct ap_queue_status status;
872 	struct ap_tapq_hwinfo hwinfo;
873 
874 	if (!ap_q_supports_bind(aq))
875 		return sysfs_emit(buf, "-\n");
876 
877 	status = ap_test_queue(aq->qid, 1, &hwinfo);
878 	if (status.response_code > AP_RESPONSE_BUSY) {
879 		pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
880 			 status.response_code,
881 			 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
882 		return -EIO;
883 	}
884 
885 	/* update queue's SE bind state */
886 	spin_lock_bh(&aq->lock);
887 	aq->se_bstate = hwinfo.bs;
888 	spin_unlock_bh(&aq->lock);
889 
890 	switch (hwinfo.bs) {
891 	case AP_BS_Q_USABLE:
892 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
893 		return sysfs_emit(buf, "bound\n");
894 	default:
895 		return sysfs_emit(buf, "unbound\n");
896 	}
897 }
898 
899 static ssize_t se_bind_store(struct device *dev,
900 			     struct device_attribute *attr,
901 			     const char *buf, size_t count)
902 {
903 	struct ap_queue *aq = to_ap_queue(dev);
904 	struct ap_queue_status status;
905 	struct ap_tapq_hwinfo hwinfo;
906 	bool value;
907 	int rc;
908 
909 	if (!ap_q_supports_bind(aq))
910 		return -EINVAL;
911 
912 	/* only 0 (unbind) and 1 (bind) allowed */
913 	rc = kstrtobool(buf, &value);
914 	if (rc)
915 		return rc;
916 
917 	if (!value) {
918 		/* Unbind. Set F bit arg and trigger RAPQ */
919 		spin_lock_bh(&aq->lock);
920 		__ap_flush_queue(aq);
921 		aq->rapq_fbit = 1;
922 		_ap_queue_init_state(aq);
923 		rc = count;
924 		goto out;
925 	}
926 
927 	/* Bind. Check current SE bind state */
928 	status = ap_test_queue(aq->qid, 1, &hwinfo);
929 	if (status.response_code) {
930 		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
931 			    __func__, status.response_code,
932 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
933 		return -EIO;
934 	}
935 
936 	/* Update BS state */
937 	spin_lock_bh(&aq->lock);
938 	aq->se_bstate = hwinfo.bs;
939 	if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
940 		AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
941 			    __func__, hwinfo.bs,
942 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
943 		rc = -EINVAL;
944 		goto out;
945 	}
946 
947 	/* Check SM state */
948 	if (aq->sm_state < AP_SM_STATE_IDLE) {
949 		rc = -EBUSY;
950 		goto out;
951 	}
952 
953 	/* invoke BAPQ */
954 	status = ap_bapq(aq->qid);
955 	if (status.response_code) {
956 		AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
957 			    __func__, status.response_code,
958 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
959 		rc = -EIO;
960 		goto out;
961 	}
962 	aq->assoc_idx = ASSOC_IDX_INVALID;
963 
964 	/* verify SE bind state */
965 	status = ap_test_queue(aq->qid, 1, &hwinfo);
966 	if (status.response_code) {
967 		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
968 			    __func__, status.response_code,
969 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
970 		rc = -EIO;
971 		goto out;
972 	}
973 	aq->se_bstate = hwinfo.bs;
974 	if (!(hwinfo.bs == AP_BS_Q_USABLE ||
975 	      hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
976 		AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
977 			    __func__, hwinfo.bs,
978 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
979 		rc = -EIO;
980 		goto out;
981 	}
982 
983 	/* SE bind was successful */
984 	AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
985 		    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
986 	rc = count;
987 
988 out:
989 	spin_unlock_bh(&aq->lock);
990 	return rc;
991 }
992 
993 static DEVICE_ATTR_RW(se_bind);
994 
995 static ssize_t se_associate_show(struct device *dev,
996 				 struct device_attribute *attr, char *buf)
997 {
998 	struct ap_queue *aq = to_ap_queue(dev);
999 	struct ap_queue_status status;
1000 	struct ap_tapq_hwinfo hwinfo;
1001 
1002 	if (!ap_q_supports_assoc(aq))
1003 		return sysfs_emit(buf, "-\n");
1004 
1005 	status = ap_test_queue(aq->qid, 1, &hwinfo);
1006 	if (status.response_code > AP_RESPONSE_BUSY) {
1007 		pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
1008 			 status.response_code,
1009 			 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1010 		return -EIO;
1011 	}
1012 
1013 	/* update queue's SE bind state */
1014 	spin_lock_bh(&aq->lock);
1015 	aq->se_bstate = hwinfo.bs;
1016 	spin_unlock_bh(&aq->lock);
1017 
1018 	switch (hwinfo.bs) {
1019 	case AP_BS_Q_USABLE:
1020 		if (aq->assoc_idx == ASSOC_IDX_INVALID) {
1021 			AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
1022 			return -EIO;
1023 		}
1024 		return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
1025 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
1026 		if (aq->assoc_idx != ASSOC_IDX_INVALID)
1027 			return sysfs_emit(buf, "association pending\n");
1028 		fallthrough;
1029 	default:
1030 		return sysfs_emit(buf, "unassociated\n");
1031 	}
1032 }
1033 
1034 static ssize_t se_associate_store(struct device *dev,
1035 				  struct device_attribute *attr,
1036 				  const char *buf, size_t count)
1037 {
1038 	struct ap_queue *aq = to_ap_queue(dev);
1039 	struct ap_queue_status status;
1040 	struct ap_tapq_hwinfo hwinfo;
1041 	unsigned int value;
1042 	int rc;
1043 
1044 	if (!ap_q_supports_assoc(aq))
1045 		return -EINVAL;
1046 
1047 	/* association index needs to be >= 0 */
1048 	rc = kstrtouint(buf, 0, &value);
1049 	if (rc)
1050 		return rc;
1051 	if (value >= ASSOC_IDX_INVALID)
1052 		return -EINVAL;
1053 
1054 	/* check current SE bind state */
1055 	status = ap_test_queue(aq->qid, 1, &hwinfo);
1056 	if (status.response_code) {
1057 		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1058 			    __func__, status.response_code,
1059 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1060 		return -EIO;
1061 	}
1062 	spin_lock_bh(&aq->lock);
1063 	aq->se_bstate = hwinfo.bs;
1064 	if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
1065 		AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
1066 			    __func__, hwinfo.bs,
1067 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1068 		rc = -EINVAL;
1069 		goto out;
1070 	}
1071 
1072 	/* check SM state */
1073 	if (aq->sm_state != AP_SM_STATE_IDLE) {
1074 		rc = -EBUSY;
1075 		goto out;
1076 	}
1077 
1078 	/* trigger the asynchronous association request */
1079 	status = ap_aapq(aq->qid, value);
1080 	switch (status.response_code) {
1081 	case AP_RESPONSE_NORMAL:
1082 	case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1083 		aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1084 		aq->assoc_idx = value;
1085 		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1086 		break;
1087 	default:
1088 		AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1089 			    __func__, status.response_code,
1090 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1091 		rc = -EIO;
1092 		goto out;
1093 	}
1094 
1095 	rc = count;
1096 
1097 out:
1098 	spin_unlock_bh(&aq->lock);
1099 	return rc;
1100 }
1101 
1102 static DEVICE_ATTR_RW(se_associate);
1103 
1104 static struct attribute *ap_queue_dev_sb_attrs[] = {
1105 	&dev_attr_se_bind.attr,
1106 	&dev_attr_se_associate.attr,
1107 	NULL
1108 };
1109 
1110 static struct attribute_group ap_queue_dev_sb_attr_group = {
1111 	.attrs = ap_queue_dev_sb_attrs
1112 };
1113 
1114 static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1115 	&ap_queue_dev_sb_attr_group,
1116 	NULL
1117 };
1118 
1119 static void ap_queue_device_release(struct device *dev)
1120 {
1121 	struct ap_queue *aq = to_ap_queue(dev);
1122 
1123 	spin_lock_bh(&ap_queues_lock);
1124 	hash_del(&aq->hnode);
1125 	spin_unlock_bh(&ap_queues_lock);
1126 
1127 	kfree(aq);
1128 }
1129 
1130 struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac)
1131 {
1132 	struct ap_queue *aq;
1133 
1134 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1135 	if (!aq)
1136 		return NULL;
1137 	aq->card = ac;
1138 	aq->ap_dev.device.release = ap_queue_device_release;
1139 	aq->ap_dev.device.type = &ap_queue_type;
1140 	aq->ap_dev.device_type = ac->ap_dev.device_type;
1141 	/* in SE environment add bind/associate attributes group */
1142 	if (ap_is_se_guest() && ap_q_supported_in_se(aq))
1143 		aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1144 	aq->qid = qid;
1145 	spin_lock_init(&aq->lock);
1146 	INIT_LIST_HEAD(&aq->pendingq);
1147 	INIT_LIST_HEAD(&aq->requestq);
1148 	timer_setup(&aq->timeout, ap_request_timeout, 0);
1149 
1150 	return aq;
1151 }
1152 
1153 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1154 {
1155 	aq->reply = reply;
1156 
1157 	spin_lock_bh(&aq->lock);
1158 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1159 	spin_unlock_bh(&aq->lock);
1160 }
1161 EXPORT_SYMBOL(ap_queue_init_reply);
1162 
1163 /**
1164  * ap_queue_message(): Queue a request to an AP device.
1165  * @aq: The AP device to queue the message to
1166  * @ap_msg: The message that is to be added
1167  */
1168 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1169 {
1170 	int rc = 0;
1171 
1172 	/* msg needs to have a valid receive-callback */
1173 	BUG_ON(!ap_msg->receive);
1174 
1175 	spin_lock_bh(&aq->lock);
1176 
1177 	/* only allow to queue new messages if device state is ok */
1178 	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1179 		list_add_tail(&ap_msg->list, &aq->requestq);
1180 		aq->requestq_count++;
1181 		aq->total_request_count++;
1182 		atomic64_inc(&aq->card->total_request_count);
1183 	} else {
1184 		rc = -ENODEV;
1185 	}
1186 
1187 	/* Send/receive as many request from the queue as possible. */
1188 	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1189 
1190 	spin_unlock_bh(&aq->lock);
1191 
1192 	return rc;
1193 }
1194 EXPORT_SYMBOL(ap_queue_message);
1195 
1196 /**
1197  * ap_queue_usable(): Check if queue is usable just now.
1198  * @aq: The AP queue device to test for usability.
1199  * This function is intended for the scheduler to query if it makes
1200  * sense to enqueue a message into this AP queue device by calling
1201  * ap_queue_message(). The perspective is very short-term as the
1202  * state machine and device state(s) may change at any time.
1203  */
1204 bool ap_queue_usable(struct ap_queue *aq)
1205 {
1206 	bool rc = true;
1207 
1208 	spin_lock_bh(&aq->lock);
1209 
1210 	/* check for not configured or checkstopped */
1211 	if (!aq->config || aq->chkstop) {
1212 		rc = false;
1213 		goto unlock_and_out;
1214 	}
1215 
1216 	/* device state needs to be ok */
1217 	if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1218 		rc = false;
1219 		goto unlock_and_out;
1220 	}
1221 
1222 	/* SE guest's queues additionally need to be bound */
1223 	if (ap_is_se_guest()) {
1224 		if (!ap_q_supported_in_se(aq)) {
1225 			rc = false;
1226 			goto unlock_and_out;
1227 		}
1228 		if (ap_q_needs_bind(aq) &&
1229 		    !(aq->se_bstate == AP_BS_Q_USABLE ||
1230 		      aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
1231 			rc = false;
1232 	}
1233 
1234 unlock_and_out:
1235 	spin_unlock_bh(&aq->lock);
1236 	return rc;
1237 }
1238 EXPORT_SYMBOL(ap_queue_usable);
1239 
1240 /**
1241  * ap_cancel_message(): Cancel a crypto request.
1242  * @aq: The AP device that has the message queued
1243  * @ap_msg: The message that is to be removed
1244  *
1245  * Cancel a crypto request. This is done by removing the request
1246  * from the device pending or request queue. Note that the
1247  * request stays on the AP queue. When it finishes the message
1248  * reply will be discarded because the psmid can't be found.
1249  */
1250 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1251 {
1252 	struct ap_message *tmp;
1253 
1254 	spin_lock_bh(&aq->lock);
1255 	if (!list_empty(&ap_msg->list)) {
1256 		list_for_each_entry(tmp, &aq->pendingq, list)
1257 			if (tmp->psmid == ap_msg->psmid) {
1258 				aq->pendingq_count--;
1259 				goto found;
1260 			}
1261 		aq->requestq_count--;
1262 found:
1263 		list_del_init(&ap_msg->list);
1264 	}
1265 	spin_unlock_bh(&aq->lock);
1266 }
1267 EXPORT_SYMBOL(ap_cancel_message);
1268 
1269 /**
1270  * __ap_flush_queue(): Flush requests.
1271  * @aq: Pointer to the AP queue
1272  *
1273  * Flush all requests from the request/pending queue of an AP device.
1274  */
1275 static void __ap_flush_queue(struct ap_queue *aq)
1276 {
1277 	struct ap_message *ap_msg, *next;
1278 
1279 	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1280 		list_del_init(&ap_msg->list);
1281 		aq->pendingq_count--;
1282 		ap_msg->rc = -EAGAIN;
1283 		ap_msg->receive(aq, ap_msg, NULL);
1284 	}
1285 	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1286 		list_del_init(&ap_msg->list);
1287 		aq->requestq_count--;
1288 		ap_msg->rc = -EAGAIN;
1289 		ap_msg->receive(aq, ap_msg, NULL);
1290 	}
1291 	aq->queue_count = 0;
1292 }
1293 
1294 void ap_flush_queue(struct ap_queue *aq)
1295 {
1296 	spin_lock_bh(&aq->lock);
1297 	__ap_flush_queue(aq);
1298 	spin_unlock_bh(&aq->lock);
1299 }
1300 EXPORT_SYMBOL(ap_flush_queue);
1301 
1302 void ap_queue_prepare_remove(struct ap_queue *aq)
1303 {
1304 	spin_lock_bh(&aq->lock);
1305 	/* flush queue */
1306 	__ap_flush_queue(aq);
1307 	/* move queue device state to SHUTDOWN in progress */
1308 	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1309 	spin_unlock_bh(&aq->lock);
1310 	timer_delete_sync(&aq->timeout);
1311 }
1312 
1313 void ap_queue_remove(struct ap_queue *aq)
1314 {
1315 	/*
1316 	 * all messages have been flushed and the device state
1317 	 * is SHUTDOWN. Now reset with zero which also clears
1318 	 * the irq registration and move the device state
1319 	 * to the initial value AP_DEV_STATE_UNINITIATED.
1320 	 */
1321 	spin_lock_bh(&aq->lock);
1322 	ap_zapq(aq->qid, 0);
1323 	aq->dev_state = AP_DEV_STATE_UNINITIATED;
1324 	spin_unlock_bh(&aq->lock);
1325 }
1326 
1327 void _ap_queue_init_state(struct ap_queue *aq)
1328 {
1329 	aq->dev_state = AP_DEV_STATE_OPERATING;
1330 	aq->sm_state = AP_SM_STATE_RESET_START;
1331 	aq->last_err_rc = 0;
1332 	aq->assoc_idx = ASSOC_IDX_INVALID;
1333 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1334 }
1335 
1336 void ap_queue_init_state(struct ap_queue *aq)
1337 {
1338 	spin_lock_bh(&aq->lock);
1339 	_ap_queue_init_state(aq);
1340 	spin_unlock_bh(&aq->lock);
1341 }
1342 EXPORT_SYMBOL(ap_queue_init_state);
1343