xref: /linux/drivers/s390/crypto/ap_queue.c (revision 0c8ea05e9b3d8e5287e2a968f2a2e744dfd31b99)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2016, 2023
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  *
6  * Adjunct processor bus, queue related code.
7  */
8 
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15 
16 #include "ap_bus.h"
17 #include "ap_debug.h"
18 
19 static void __ap_flush_queue(struct ap_queue *aq);
20 
21 /*
22  * some AP queue helper functions
23  */
24 
25 static inline bool ap_q_supports_bind(struct ap_queue *aq)
26 {
27 	return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
28 }
29 
30 static inline bool ap_q_supports_assoc(struct ap_queue *aq)
31 {
32 	return aq->card->hwinfo.ep11;
33 }
34 
35 static inline bool ap_q_needs_bind(struct ap_queue *aq)
36 {
37 	return ap_q_supports_bind(aq) && ap_sb_available();
38 }
39 
40 /**
41  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
42  * @aq: The AP queue
43  * @ind: the notification indicator byte
44  *
45  * Enables interruption on AP queue via ap_aqic(). Based on the return
46  * value it waits a while and tests the AP queue if interrupts
47  * have been switched on using ap_test_queue().
48  */
49 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
50 {
51 	union ap_qirq_ctrl qirqctrl = { .value = 0 };
52 	struct ap_queue_status status;
53 
54 	qirqctrl.ir = 1;
55 	qirqctrl.isc = AP_ISC;
56 	status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
57 	if (status.async)
58 		return -EPERM;
59 	switch (status.response_code) {
60 	case AP_RESPONSE_NORMAL:
61 	case AP_RESPONSE_OTHERWISE_CHANGED:
62 		return 0;
63 	case AP_RESPONSE_Q_NOT_AVAIL:
64 	case AP_RESPONSE_DECONFIGURED:
65 	case AP_RESPONSE_CHECKSTOPPED:
66 	case AP_RESPONSE_INVALID_ADDRESS:
67 		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
68 		       AP_QID_CARD(aq->qid),
69 		       AP_QID_QUEUE(aq->qid));
70 		return -EOPNOTSUPP;
71 	case AP_RESPONSE_RESET_IN_PROGRESS:
72 	case AP_RESPONSE_BUSY:
73 	default:
74 		return -EBUSY;
75 	}
76 }
77 
78 /**
79  * __ap_send(): Send message to adjunct processor queue.
80  * @qid: The AP queue number
81  * @psmid: The program supplied message identifier
82  * @msg: The message text
83  * @msglen: The message length
84  * @special: Special Bit
85  *
86  * Returns AP queue status structure.
87  * Condition code 1 on NQAP can't happen because the L bit is 1.
88  * Condition code 2 on NQAP also means the send is incomplete,
89  * because a segment boundary was reached. The NQAP is repeated.
90  */
91 static inline struct ap_queue_status
92 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
93 	  int special)
94 {
95 	if (special)
96 		qid |= 0x400000UL;
97 	return ap_nqap(qid, psmid, msg, msglen);
98 }
99 
100 /* State machine definitions and helpers */
101 
102 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
103 {
104 	return AP_SM_WAIT_NONE;
105 }
106 
107 /**
108  * ap_sm_recv(): Receive pending reply messages from an AP queue but do
109  *	not change the state of the device.
110  * @aq: pointer to the AP queue
111  *
112  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
113  */
114 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
115 {
116 	struct ap_queue_status status;
117 	struct ap_message *ap_msg;
118 	bool found = false;
119 	size_t reslen;
120 	unsigned long resgr0 = 0;
121 	int parts = 0;
122 
123 	/*
124 	 * DQAP loop until response code and resgr0 indicate that
125 	 * the msg is totally received. As we use the very same buffer
126 	 * the msg is overwritten with each invocation. That's intended
127 	 * and the receiver of the msg is informed with a msg rc code
128 	 * of EMSGSIZE in such a case.
129 	 */
130 	do {
131 		status = ap_dqap(aq->qid, &aq->reply->psmid,
132 				 aq->reply->msg, aq->reply->bufsize,
133 				 &aq->reply->len, &reslen, &resgr0);
134 		parts++;
135 	} while (status.response_code == 0xFF && resgr0 != 0);
136 
137 	switch (status.response_code) {
138 	case AP_RESPONSE_NORMAL:
139 		print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
140 				     aq->reply->msg, aq->reply->len, false);
141 		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
142 		if (!status.queue_empty && !aq->queue_count)
143 			aq->queue_count++;
144 		if (aq->queue_count > 0)
145 			mod_timer(&aq->timeout,
146 				  jiffies + aq->request_timeout);
147 		list_for_each_entry(ap_msg, &aq->pendingq, list) {
148 			if (ap_msg->psmid != aq->reply->psmid)
149 				continue;
150 			list_del_init(&ap_msg->list);
151 			aq->pendingq_count--;
152 			if (parts > 1) {
153 				ap_msg->rc = -EMSGSIZE;
154 				ap_msg->receive(aq, ap_msg, NULL);
155 			} else {
156 				ap_msg->receive(aq, ap_msg, aq->reply);
157 			}
158 			found = true;
159 			break;
160 		}
161 		if (!found) {
162 			AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
163 				    __func__, aq->reply->psmid,
164 				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
165 		}
166 		fallthrough;
167 	case AP_RESPONSE_NO_PENDING_REPLY:
168 		if (!status.queue_empty || aq->queue_count <= 0)
169 			break;
170 		/* The card shouldn't forget requests but who knows. */
171 		aq->queue_count = 0;
172 		list_splice_init(&aq->pendingq, &aq->requestq);
173 		aq->requestq_count += aq->pendingq_count;
174 		pr_debug("%s queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
175 			 __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
176 			 aq->pendingq_count, aq->requestq_count);
177 		aq->pendingq_count = 0;
178 		break;
179 	default:
180 		break;
181 	}
182 	return status;
183 }
184 
185 /**
186  * ap_sm_read(): Receive pending reply messages from an AP queue.
187  * @aq: pointer to the AP queue
188  *
189  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
190  */
191 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
192 {
193 	struct ap_queue_status status;
194 
195 	if (!aq->reply)
196 		return AP_SM_WAIT_NONE;
197 	status = ap_sm_recv(aq);
198 	if (status.async)
199 		return AP_SM_WAIT_NONE;
200 	switch (status.response_code) {
201 	case AP_RESPONSE_NORMAL:
202 		if (aq->queue_count > 0) {
203 			aq->sm_state = AP_SM_STATE_WORKING;
204 			return AP_SM_WAIT_AGAIN;
205 		}
206 		aq->sm_state = AP_SM_STATE_IDLE;
207 		break;
208 	case AP_RESPONSE_NO_PENDING_REPLY:
209 		if (aq->queue_count > 0)
210 			return status.irq_enabled ?
211 				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
212 		aq->sm_state = AP_SM_STATE_IDLE;
213 		break;
214 	default:
215 		aq->dev_state = AP_DEV_STATE_ERROR;
216 		aq->last_err_rc = status.response_code;
217 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
218 			    __func__, status.response_code,
219 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
220 		return AP_SM_WAIT_NONE;
221 	}
222 	/* Check and maybe enable irq support (again) on this queue */
223 	if (!status.irq_enabled && status.queue_empty) {
224 		void *lsi_ptr = ap_airq_ptr();
225 
226 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
227 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
228 			return AP_SM_WAIT_AGAIN;
229 		}
230 	}
231 	return AP_SM_WAIT_NONE;
232 }
233 
234 /**
235  * ap_sm_write(): Send messages from the request queue to an AP queue.
236  * @aq: pointer to the AP queue
237  *
238  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
239  */
240 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
241 {
242 	struct ap_queue_status status;
243 	struct ap_message *ap_msg;
244 	ap_qid_t qid = aq->qid;
245 
246 	if (aq->requestq_count <= 0)
247 		return AP_SM_WAIT_NONE;
248 
249 	/* Start the next request on the queue. */
250 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
251 	print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1,
252 			     ap_msg->msg, ap_msg->len, false);
253 	status = __ap_send(qid, ap_msg->psmid,
254 			   ap_msg->msg, ap_msg->len,
255 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
256 	if (status.async)
257 		return AP_SM_WAIT_NONE;
258 	switch (status.response_code) {
259 	case AP_RESPONSE_NORMAL:
260 		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
261 		if (aq->queue_count == 1)
262 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
263 		list_move_tail(&ap_msg->list, &aq->pendingq);
264 		aq->requestq_count--;
265 		aq->pendingq_count++;
266 		if (aq->queue_count < aq->card->hwinfo.qd) {
267 			aq->sm_state = AP_SM_STATE_WORKING;
268 			return AP_SM_WAIT_AGAIN;
269 		}
270 		fallthrough;
271 	case AP_RESPONSE_Q_FULL:
272 		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
273 		return status.irq_enabled ?
274 			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
275 	case AP_RESPONSE_RESET_IN_PROGRESS:
276 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
277 		return AP_SM_WAIT_LOW_TIMEOUT;
278 	case AP_RESPONSE_INVALID_DOMAIN:
279 		AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
280 		fallthrough;
281 	case AP_RESPONSE_MESSAGE_TOO_BIG:
282 	case AP_RESPONSE_REQ_FAC_NOT_INST:
283 		list_del_init(&ap_msg->list);
284 		aq->requestq_count--;
285 		ap_msg->rc = -EINVAL;
286 		ap_msg->receive(aq, ap_msg, NULL);
287 		return AP_SM_WAIT_AGAIN;
288 	default:
289 		aq->dev_state = AP_DEV_STATE_ERROR;
290 		aq->last_err_rc = status.response_code;
291 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
292 			    __func__, status.response_code,
293 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
294 		return AP_SM_WAIT_NONE;
295 	}
296 }
297 
298 /**
299  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
300  * @aq: pointer to the AP queue
301  *
302  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
303  */
304 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
305 {
306 	return min(ap_sm_read(aq), ap_sm_write(aq));
307 }
308 
309 /**
310  * ap_sm_reset(): Reset an AP queue.
311  * @aq: The AP queue
312  *
313  * Submit the Reset command to an AP queue.
314  */
315 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
316 {
317 	struct ap_queue_status status;
318 
319 	status = ap_rapq(aq->qid, aq->rapq_fbit);
320 	if (status.async)
321 		return AP_SM_WAIT_NONE;
322 	switch (status.response_code) {
323 	case AP_RESPONSE_NORMAL:
324 	case AP_RESPONSE_RESET_IN_PROGRESS:
325 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
326 		aq->rapq_fbit = 0;
327 		return AP_SM_WAIT_LOW_TIMEOUT;
328 	default:
329 		aq->dev_state = AP_DEV_STATE_ERROR;
330 		aq->last_err_rc = status.response_code;
331 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
332 			    __func__, status.response_code,
333 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
334 		return AP_SM_WAIT_NONE;
335 	}
336 }
337 
338 /**
339  * ap_sm_reset_wait(): Test queue for completion of the reset operation
340  * @aq: pointer to the AP queue
341  *
342  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
343  */
344 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
345 {
346 	struct ap_queue_status status;
347 	struct ap_tapq_hwinfo hwinfo;
348 	void *lsi_ptr;
349 
350 	/* Get the status with TAPQ */
351 	status = ap_test_queue(aq->qid, 1, &hwinfo);
352 
353 	switch (status.response_code) {
354 	case AP_RESPONSE_NORMAL:
355 		aq->se_bstate = hwinfo.bs;
356 		lsi_ptr = ap_airq_ptr();
357 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
358 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
359 		else
360 			aq->sm_state = (aq->queue_count > 0) ?
361 				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
362 		return AP_SM_WAIT_AGAIN;
363 	case AP_RESPONSE_BUSY:
364 	case AP_RESPONSE_RESET_IN_PROGRESS:
365 		return AP_SM_WAIT_LOW_TIMEOUT;
366 	case AP_RESPONSE_Q_NOT_AVAIL:
367 	case AP_RESPONSE_DECONFIGURED:
368 	case AP_RESPONSE_CHECKSTOPPED:
369 	default:
370 		aq->dev_state = AP_DEV_STATE_ERROR;
371 		aq->last_err_rc = status.response_code;
372 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
373 			    __func__, status.response_code,
374 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
375 		return AP_SM_WAIT_NONE;
376 	}
377 }
378 
379 /**
380  * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
381  * @aq: pointer to the AP queue
382  *
383  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
384  */
385 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
386 {
387 	struct ap_queue_status status;
388 
389 	if (aq->queue_count > 0 && aq->reply)
390 		/* Try to read a completed message and get the status */
391 		status = ap_sm_recv(aq);
392 	else
393 		/* Get the status with TAPQ */
394 		status = ap_tapq(aq->qid, NULL);
395 
396 	if (status.irq_enabled == 1) {
397 		/* Irqs are now enabled */
398 		aq->sm_state = (aq->queue_count > 0) ?
399 			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
400 	}
401 
402 	switch (status.response_code) {
403 	case AP_RESPONSE_NORMAL:
404 		if (aq->queue_count > 0)
405 			return AP_SM_WAIT_AGAIN;
406 		fallthrough;
407 	case AP_RESPONSE_NO_PENDING_REPLY:
408 		return AP_SM_WAIT_LOW_TIMEOUT;
409 	default:
410 		aq->dev_state = AP_DEV_STATE_ERROR;
411 		aq->last_err_rc = status.response_code;
412 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
413 			    __func__, status.response_code,
414 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
415 		return AP_SM_WAIT_NONE;
416 	}
417 }
418 
419 /**
420  * ap_sm_assoc_wait(): Test queue for completion of a pending
421  *		       association request.
422  * @aq: pointer to the AP queue
423  */
424 static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
425 {
426 	struct ap_queue_status status;
427 	struct ap_tapq_hwinfo hwinfo;
428 
429 	status = ap_test_queue(aq->qid, 1, &hwinfo);
430 	/* handle asynchronous error on this queue */
431 	if (status.async && status.response_code) {
432 		aq->dev_state = AP_DEV_STATE_ERROR;
433 		aq->last_err_rc = status.response_code;
434 		AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
435 			    __func__, status.response_code,
436 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
437 		return AP_SM_WAIT_NONE;
438 	}
439 	if (status.response_code > AP_RESPONSE_BUSY) {
440 		aq->dev_state = AP_DEV_STATE_ERROR;
441 		aq->last_err_rc = status.response_code;
442 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
443 			    __func__, status.response_code,
444 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
445 		return AP_SM_WAIT_NONE;
446 	}
447 
448 	/* update queue's SE bind state */
449 	aq->se_bstate = hwinfo.bs;
450 
451 	/* check bs bits */
452 	switch (hwinfo.bs) {
453 	case AP_BS_Q_USABLE:
454 		/* association is through */
455 		aq->sm_state = AP_SM_STATE_IDLE;
456 		pr_debug("%s queue 0x%02x.%04x associated with %u\n",
457 			 __func__, AP_QID_CARD(aq->qid),
458 			 AP_QID_QUEUE(aq->qid), aq->assoc_idx);
459 		return AP_SM_WAIT_NONE;
460 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
461 		/* association still pending */
462 		return AP_SM_WAIT_LOW_TIMEOUT;
463 	default:
464 		/* reset from 'outside' happened or no idea at all */
465 		aq->assoc_idx = ASSOC_IDX_INVALID;
466 		aq->dev_state = AP_DEV_STATE_ERROR;
467 		aq->last_err_rc = status.response_code;
468 		AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
469 			    __func__, hwinfo.bs,
470 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
471 		return AP_SM_WAIT_NONE;
472 	}
473 }
474 
475 /*
476  * AP state machine jump table
477  */
478 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
479 	[AP_SM_STATE_RESET_START] = {
480 		[AP_SM_EVENT_POLL] = ap_sm_reset,
481 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
482 	},
483 	[AP_SM_STATE_RESET_WAIT] = {
484 		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
485 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
486 	},
487 	[AP_SM_STATE_SETIRQ_WAIT] = {
488 		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
489 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
490 	},
491 	[AP_SM_STATE_IDLE] = {
492 		[AP_SM_EVENT_POLL] = ap_sm_write,
493 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
494 	},
495 	[AP_SM_STATE_WORKING] = {
496 		[AP_SM_EVENT_POLL] = ap_sm_read_write,
497 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
498 	},
499 	[AP_SM_STATE_QUEUE_FULL] = {
500 		[AP_SM_EVENT_POLL] = ap_sm_read,
501 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
502 	},
503 	[AP_SM_STATE_ASSOC_WAIT] = {
504 		[AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
505 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
506 	},
507 };
508 
509 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
510 {
511 	if (aq->config && !aq->chkstop &&
512 	    aq->dev_state > AP_DEV_STATE_UNINITIATED)
513 		return ap_jumptable[aq->sm_state][event](aq);
514 	else
515 		return AP_SM_WAIT_NONE;
516 }
517 
518 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
519 {
520 	enum ap_sm_wait wait;
521 
522 	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
523 		;
524 	return wait;
525 }
526 
527 /*
528  * AP queue related attributes.
529  */
530 static ssize_t request_count_show(struct device *dev,
531 				  struct device_attribute *attr,
532 				  char *buf)
533 {
534 	struct ap_queue *aq = to_ap_queue(dev);
535 	bool valid = false;
536 	u64 req_cnt;
537 
538 	spin_lock_bh(&aq->lock);
539 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
540 		req_cnt = aq->total_request_count;
541 		valid = true;
542 	}
543 	spin_unlock_bh(&aq->lock);
544 
545 	if (valid)
546 		return sysfs_emit(buf, "%llu\n", req_cnt);
547 	else
548 		return sysfs_emit(buf, "-\n");
549 }
550 
551 static ssize_t request_count_store(struct device *dev,
552 				   struct device_attribute *attr,
553 				   const char *buf, size_t count)
554 {
555 	struct ap_queue *aq = to_ap_queue(dev);
556 
557 	spin_lock_bh(&aq->lock);
558 	aq->total_request_count = 0;
559 	spin_unlock_bh(&aq->lock);
560 
561 	return count;
562 }
563 
564 static DEVICE_ATTR_RW(request_count);
565 
566 static ssize_t requestq_count_show(struct device *dev,
567 				   struct device_attribute *attr, char *buf)
568 {
569 	struct ap_queue *aq = to_ap_queue(dev);
570 	unsigned int reqq_cnt = 0;
571 
572 	spin_lock_bh(&aq->lock);
573 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
574 		reqq_cnt = aq->requestq_count;
575 	spin_unlock_bh(&aq->lock);
576 	return sysfs_emit(buf, "%d\n", reqq_cnt);
577 }
578 
579 static DEVICE_ATTR_RO(requestq_count);
580 
581 static ssize_t pendingq_count_show(struct device *dev,
582 				   struct device_attribute *attr, char *buf)
583 {
584 	struct ap_queue *aq = to_ap_queue(dev);
585 	unsigned int penq_cnt = 0;
586 
587 	spin_lock_bh(&aq->lock);
588 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
589 		penq_cnt = aq->pendingq_count;
590 	spin_unlock_bh(&aq->lock);
591 	return sysfs_emit(buf, "%d\n", penq_cnt);
592 }
593 
594 static DEVICE_ATTR_RO(pendingq_count);
595 
596 static ssize_t reset_show(struct device *dev,
597 			  struct device_attribute *attr, char *buf)
598 {
599 	struct ap_queue *aq = to_ap_queue(dev);
600 	int rc = 0;
601 
602 	spin_lock_bh(&aq->lock);
603 	switch (aq->sm_state) {
604 	case AP_SM_STATE_RESET_START:
605 	case AP_SM_STATE_RESET_WAIT:
606 		rc = sysfs_emit(buf, "Reset in progress.\n");
607 		break;
608 	case AP_SM_STATE_WORKING:
609 	case AP_SM_STATE_QUEUE_FULL:
610 		rc = sysfs_emit(buf, "Reset Timer armed.\n");
611 		break;
612 	default:
613 		rc = sysfs_emit(buf, "No Reset Timer set.\n");
614 	}
615 	spin_unlock_bh(&aq->lock);
616 	return rc;
617 }
618 
619 static ssize_t reset_store(struct device *dev,
620 			   struct device_attribute *attr,
621 			   const char *buf, size_t count)
622 {
623 	struct ap_queue *aq = to_ap_queue(dev);
624 
625 	spin_lock_bh(&aq->lock);
626 	__ap_flush_queue(aq);
627 	aq->sm_state = AP_SM_STATE_RESET_START;
628 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
629 	spin_unlock_bh(&aq->lock);
630 
631 	AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
632 		    __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
633 
634 	return count;
635 }
636 
637 static DEVICE_ATTR_RW(reset);
638 
639 static ssize_t interrupt_show(struct device *dev,
640 			      struct device_attribute *attr, char *buf)
641 {
642 	struct ap_queue *aq = to_ap_queue(dev);
643 	struct ap_queue_status status;
644 	int rc = 0;
645 
646 	spin_lock_bh(&aq->lock);
647 	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
648 		rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
649 	} else {
650 		status = ap_tapq(aq->qid, NULL);
651 		if (status.irq_enabled)
652 			rc = sysfs_emit(buf, "Interrupts enabled.\n");
653 		else
654 			rc = sysfs_emit(buf, "Interrupts disabled.\n");
655 	}
656 	spin_unlock_bh(&aq->lock);
657 
658 	return rc;
659 }
660 
661 static DEVICE_ATTR_RO(interrupt);
662 
663 static ssize_t config_show(struct device *dev,
664 			   struct device_attribute *attr, char *buf)
665 {
666 	struct ap_queue *aq = to_ap_queue(dev);
667 	int rc;
668 
669 	spin_lock_bh(&aq->lock);
670 	rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
671 	spin_unlock_bh(&aq->lock);
672 	return rc;
673 }
674 
675 static DEVICE_ATTR_RO(config);
676 
677 static ssize_t chkstop_show(struct device *dev,
678 			    struct device_attribute *attr, char *buf)
679 {
680 	struct ap_queue *aq = to_ap_queue(dev);
681 	int rc;
682 
683 	spin_lock_bh(&aq->lock);
684 	rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
685 	spin_unlock_bh(&aq->lock);
686 	return rc;
687 }
688 
689 static DEVICE_ATTR_RO(chkstop);
690 
691 static ssize_t ap_functions_show(struct device *dev,
692 				 struct device_attribute *attr, char *buf)
693 {
694 	struct ap_queue *aq = to_ap_queue(dev);
695 	struct ap_queue_status status;
696 	struct ap_tapq_hwinfo hwinfo;
697 
698 	status = ap_test_queue(aq->qid, 1, &hwinfo);
699 	if (status.response_code > AP_RESPONSE_BUSY) {
700 		pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
701 			 __func__, status.response_code,
702 			 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
703 		return -EIO;
704 	}
705 
706 	return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
707 }
708 
709 static DEVICE_ATTR_RO(ap_functions);
710 
711 #ifdef CONFIG_AP_DEBUG
712 static ssize_t states_show(struct device *dev,
713 			   struct device_attribute *attr, char *buf)
714 {
715 	struct ap_queue *aq = to_ap_queue(dev);
716 	int rc = 0;
717 
718 	spin_lock_bh(&aq->lock);
719 	/* queue device state */
720 	switch (aq->dev_state) {
721 	case AP_DEV_STATE_UNINITIATED:
722 		rc = sysfs_emit(buf, "UNINITIATED\n");
723 		break;
724 	case AP_DEV_STATE_OPERATING:
725 		rc = sysfs_emit(buf, "OPERATING");
726 		break;
727 	case AP_DEV_STATE_SHUTDOWN:
728 		rc = sysfs_emit(buf, "SHUTDOWN");
729 		break;
730 	case AP_DEV_STATE_ERROR:
731 		rc = sysfs_emit(buf, "ERROR");
732 		break;
733 	default:
734 		rc = sysfs_emit(buf, "UNKNOWN");
735 	}
736 	/* state machine state */
737 	if (aq->dev_state) {
738 		switch (aq->sm_state) {
739 		case AP_SM_STATE_RESET_START:
740 			rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
741 			break;
742 		case AP_SM_STATE_RESET_WAIT:
743 			rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
744 			break;
745 		case AP_SM_STATE_SETIRQ_WAIT:
746 			rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
747 			break;
748 		case AP_SM_STATE_IDLE:
749 			rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
750 			break;
751 		case AP_SM_STATE_WORKING:
752 			rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
753 			break;
754 		case AP_SM_STATE_QUEUE_FULL:
755 			rc += sysfs_emit_at(buf, rc, " [FULL]\n");
756 			break;
757 		case AP_SM_STATE_ASSOC_WAIT:
758 			rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
759 			break;
760 		default:
761 			rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
762 		}
763 	}
764 	spin_unlock_bh(&aq->lock);
765 
766 	return rc;
767 }
768 static DEVICE_ATTR_RO(states);
769 
770 static ssize_t last_err_rc_show(struct device *dev,
771 				struct device_attribute *attr, char *buf)
772 {
773 	struct ap_queue *aq = to_ap_queue(dev);
774 	int rc;
775 
776 	spin_lock_bh(&aq->lock);
777 	rc = aq->last_err_rc;
778 	spin_unlock_bh(&aq->lock);
779 
780 	switch (rc) {
781 	case AP_RESPONSE_NORMAL:
782 		return sysfs_emit(buf, "NORMAL\n");
783 	case AP_RESPONSE_Q_NOT_AVAIL:
784 		return sysfs_emit(buf, "Q_NOT_AVAIL\n");
785 	case AP_RESPONSE_RESET_IN_PROGRESS:
786 		return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
787 	case AP_RESPONSE_DECONFIGURED:
788 		return sysfs_emit(buf, "DECONFIGURED\n");
789 	case AP_RESPONSE_CHECKSTOPPED:
790 		return sysfs_emit(buf, "CHECKSTOPPED\n");
791 	case AP_RESPONSE_BUSY:
792 		return sysfs_emit(buf, "BUSY\n");
793 	case AP_RESPONSE_INVALID_ADDRESS:
794 		return sysfs_emit(buf, "INVALID_ADDRESS\n");
795 	case AP_RESPONSE_OTHERWISE_CHANGED:
796 		return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
797 	case AP_RESPONSE_Q_FULL:
798 		return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
799 	case AP_RESPONSE_INDEX_TOO_BIG:
800 		return sysfs_emit(buf, "INDEX_TOO_BIG\n");
801 	case AP_RESPONSE_NO_FIRST_PART:
802 		return sysfs_emit(buf, "NO_FIRST_PART\n");
803 	case AP_RESPONSE_MESSAGE_TOO_BIG:
804 		return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
805 	case AP_RESPONSE_REQ_FAC_NOT_INST:
806 		return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
807 	default:
808 		return sysfs_emit(buf, "response code %d\n", rc);
809 	}
810 }
811 static DEVICE_ATTR_RO(last_err_rc);
812 #endif
813 
814 static struct attribute *ap_queue_dev_attrs[] = {
815 	&dev_attr_request_count.attr,
816 	&dev_attr_requestq_count.attr,
817 	&dev_attr_pendingq_count.attr,
818 	&dev_attr_reset.attr,
819 	&dev_attr_interrupt.attr,
820 	&dev_attr_config.attr,
821 	&dev_attr_chkstop.attr,
822 	&dev_attr_ap_functions.attr,
823 #ifdef CONFIG_AP_DEBUG
824 	&dev_attr_states.attr,
825 	&dev_attr_last_err_rc.attr,
826 #endif
827 	NULL
828 };
829 
830 static struct attribute_group ap_queue_dev_attr_group = {
831 	.attrs = ap_queue_dev_attrs
832 };
833 
834 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
835 	&ap_queue_dev_attr_group,
836 	NULL
837 };
838 
839 static struct device_type ap_queue_type = {
840 	.name = "ap_queue",
841 	.groups = ap_queue_dev_attr_groups,
842 };
843 
844 static ssize_t se_bind_show(struct device *dev,
845 			    struct device_attribute *attr, char *buf)
846 {
847 	struct ap_queue *aq = to_ap_queue(dev);
848 	struct ap_queue_status status;
849 	struct ap_tapq_hwinfo hwinfo;
850 
851 	if (!ap_q_supports_bind(aq))
852 		return sysfs_emit(buf, "-\n");
853 
854 	status = ap_test_queue(aq->qid, 1, &hwinfo);
855 	if (status.response_code > AP_RESPONSE_BUSY) {
856 		pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
857 			 __func__, status.response_code,
858 			 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
859 		return -EIO;
860 	}
861 
862 	/* update queue's SE bind state */
863 	spin_lock_bh(&aq->lock);
864 	aq->se_bstate = hwinfo.bs;
865 	spin_unlock_bh(&aq->lock);
866 
867 	switch (hwinfo.bs) {
868 	case AP_BS_Q_USABLE:
869 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
870 		return sysfs_emit(buf, "bound\n");
871 	default:
872 		return sysfs_emit(buf, "unbound\n");
873 	}
874 }
875 
876 static ssize_t se_bind_store(struct device *dev,
877 			     struct device_attribute *attr,
878 			     const char *buf, size_t count)
879 {
880 	struct ap_queue *aq = to_ap_queue(dev);
881 	struct ap_queue_status status;
882 	struct ap_tapq_hwinfo hwinfo;
883 	bool value;
884 	int rc;
885 
886 	if (!ap_q_supports_bind(aq))
887 		return -EINVAL;
888 
889 	/* only 0 (unbind) and 1 (bind) allowed */
890 	rc = kstrtobool(buf, &value);
891 	if (rc)
892 		return rc;
893 
894 	if (!value) {
895 		/* Unbind. Set F bit arg and trigger RAPQ */
896 		spin_lock_bh(&aq->lock);
897 		__ap_flush_queue(aq);
898 		aq->rapq_fbit = 1;
899 		_ap_queue_init_state(aq);
900 		rc = count;
901 		goto out;
902 	}
903 
904 	/* Bind. Check current SE bind state */
905 	status = ap_test_queue(aq->qid, 1, &hwinfo);
906 	if (status.response_code) {
907 		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
908 			    __func__, status.response_code,
909 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
910 		return -EIO;
911 	}
912 
913 	/* Update BS state */
914 	spin_lock_bh(&aq->lock);
915 	aq->se_bstate = hwinfo.bs;
916 	if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
917 		AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
918 			    __func__, hwinfo.bs,
919 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
920 		rc = -EINVAL;
921 		goto out;
922 	}
923 
924 	/* Check SM state */
925 	if (aq->sm_state < AP_SM_STATE_IDLE) {
926 		rc = -EBUSY;
927 		goto out;
928 	}
929 
930 	/* invoke BAPQ */
931 	status = ap_bapq(aq->qid);
932 	if (status.response_code) {
933 		AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
934 			    __func__, status.response_code,
935 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
936 		rc = -EIO;
937 		goto out;
938 	}
939 	aq->assoc_idx = ASSOC_IDX_INVALID;
940 
941 	/* verify SE bind state */
942 	status = ap_test_queue(aq->qid, 1, &hwinfo);
943 	if (status.response_code) {
944 		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
945 			    __func__, status.response_code,
946 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
947 		rc = -EIO;
948 		goto out;
949 	}
950 	aq->se_bstate = hwinfo.bs;
951 	if (!(hwinfo.bs == AP_BS_Q_USABLE ||
952 	      hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
953 		AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
954 			    __func__, hwinfo.bs,
955 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
956 		rc = -EIO;
957 		goto out;
958 	}
959 
960 	/* SE bind was successful */
961 	AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
962 		    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
963 	rc = count;
964 
965 out:
966 	spin_unlock_bh(&aq->lock);
967 	return rc;
968 }
969 
970 static DEVICE_ATTR_RW(se_bind);
971 
972 static ssize_t se_associate_show(struct device *dev,
973 				 struct device_attribute *attr, char *buf)
974 {
975 	struct ap_queue *aq = to_ap_queue(dev);
976 	struct ap_queue_status status;
977 	struct ap_tapq_hwinfo hwinfo;
978 
979 	if (!ap_q_supports_assoc(aq))
980 		return sysfs_emit(buf, "-\n");
981 
982 	status = ap_test_queue(aq->qid, 1, &hwinfo);
983 	if (status.response_code > AP_RESPONSE_BUSY) {
984 		pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
985 			 __func__, status.response_code,
986 			 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
987 		return -EIO;
988 	}
989 
990 	/* update queue's SE bind state */
991 	spin_lock_bh(&aq->lock);
992 	aq->se_bstate = hwinfo.bs;
993 	spin_unlock_bh(&aq->lock);
994 
995 	switch (hwinfo.bs) {
996 	case AP_BS_Q_USABLE:
997 		if (aq->assoc_idx == ASSOC_IDX_INVALID) {
998 			AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
999 			return -EIO;
1000 		}
1001 		return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
1002 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
1003 		if (aq->assoc_idx != ASSOC_IDX_INVALID)
1004 			return sysfs_emit(buf, "association pending\n");
1005 		fallthrough;
1006 	default:
1007 		return sysfs_emit(buf, "unassociated\n");
1008 	}
1009 }
1010 
1011 static ssize_t se_associate_store(struct device *dev,
1012 				  struct device_attribute *attr,
1013 				  const char *buf, size_t count)
1014 {
1015 	struct ap_queue *aq = to_ap_queue(dev);
1016 	struct ap_queue_status status;
1017 	struct ap_tapq_hwinfo hwinfo;
1018 	unsigned int value;
1019 	int rc;
1020 
1021 	if (!ap_q_supports_assoc(aq))
1022 		return -EINVAL;
1023 
1024 	/* association index needs to be >= 0 */
1025 	rc = kstrtouint(buf, 0, &value);
1026 	if (rc)
1027 		return rc;
1028 	if (value >= ASSOC_IDX_INVALID)
1029 		return -EINVAL;
1030 
1031 	/* check current SE bind state */
1032 	status = ap_test_queue(aq->qid, 1, &hwinfo);
1033 	if (status.response_code) {
1034 		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1035 			    __func__, status.response_code,
1036 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1037 		return -EIO;
1038 	}
1039 	spin_lock_bh(&aq->lock);
1040 	aq->se_bstate = hwinfo.bs;
1041 	if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
1042 		AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
1043 			    __func__, hwinfo.bs,
1044 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1045 		rc = -EINVAL;
1046 		goto out;
1047 	}
1048 
1049 	/* check SM state */
1050 	if (aq->sm_state != AP_SM_STATE_IDLE) {
1051 		rc = -EBUSY;
1052 		goto out;
1053 	}
1054 
1055 	/* trigger the asynchronous association request */
1056 	status = ap_aapq(aq->qid, value);
1057 	switch (status.response_code) {
1058 	case AP_RESPONSE_NORMAL:
1059 	case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1060 		aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1061 		aq->assoc_idx = value;
1062 		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1063 		break;
1064 	default:
1065 		AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1066 			    __func__, status.response_code,
1067 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1068 		rc = -EIO;
1069 		goto out;
1070 	}
1071 
1072 	rc = count;
1073 
1074 out:
1075 	spin_unlock_bh(&aq->lock);
1076 	return rc;
1077 }
1078 
1079 static DEVICE_ATTR_RW(se_associate);
1080 
1081 static struct attribute *ap_queue_dev_sb_attrs[] = {
1082 	&dev_attr_se_bind.attr,
1083 	&dev_attr_se_associate.attr,
1084 	NULL
1085 };
1086 
1087 static struct attribute_group ap_queue_dev_sb_attr_group = {
1088 	.attrs = ap_queue_dev_sb_attrs
1089 };
1090 
1091 static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1092 	&ap_queue_dev_sb_attr_group,
1093 	NULL
1094 };
1095 
1096 static void ap_queue_device_release(struct device *dev)
1097 {
1098 	struct ap_queue *aq = to_ap_queue(dev);
1099 
1100 	spin_lock_bh(&ap_queues_lock);
1101 	hash_del(&aq->hnode);
1102 	spin_unlock_bh(&ap_queues_lock);
1103 
1104 	kfree(aq);
1105 }
1106 
1107 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
1108 {
1109 	struct ap_queue *aq;
1110 
1111 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1112 	if (!aq)
1113 		return NULL;
1114 	aq->ap_dev.device.release = ap_queue_device_release;
1115 	aq->ap_dev.device.type = &ap_queue_type;
1116 	aq->ap_dev.device_type = device_type;
1117 	// add optional SE secure binding attributes group
1118 	if (ap_sb_available() && is_prot_virt_guest())
1119 		aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1120 	aq->qid = qid;
1121 	spin_lock_init(&aq->lock);
1122 	INIT_LIST_HEAD(&aq->pendingq);
1123 	INIT_LIST_HEAD(&aq->requestq);
1124 	timer_setup(&aq->timeout, ap_request_timeout, 0);
1125 
1126 	return aq;
1127 }
1128 
1129 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1130 {
1131 	aq->reply = reply;
1132 
1133 	spin_lock_bh(&aq->lock);
1134 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1135 	spin_unlock_bh(&aq->lock);
1136 }
1137 EXPORT_SYMBOL(ap_queue_init_reply);
1138 
1139 /**
1140  * ap_queue_message(): Queue a request to an AP device.
1141  * @aq: The AP device to queue the message to
1142  * @ap_msg: The message that is to be added
1143  */
1144 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1145 {
1146 	int rc = 0;
1147 
1148 	/* msg needs to have a valid receive-callback */
1149 	BUG_ON(!ap_msg->receive);
1150 
1151 	spin_lock_bh(&aq->lock);
1152 
1153 	/* only allow to queue new messages if device state is ok */
1154 	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1155 		list_add_tail(&ap_msg->list, &aq->requestq);
1156 		aq->requestq_count++;
1157 		aq->total_request_count++;
1158 		atomic64_inc(&aq->card->total_request_count);
1159 	} else {
1160 		rc = -ENODEV;
1161 	}
1162 
1163 	/* Send/receive as many request from the queue as possible. */
1164 	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1165 
1166 	spin_unlock_bh(&aq->lock);
1167 
1168 	return rc;
1169 }
1170 EXPORT_SYMBOL(ap_queue_message);
1171 
1172 /**
1173  * ap_queue_usable(): Check if queue is usable just now.
1174  * @aq: The AP queue device to test for usability.
1175  * This function is intended for the scheduler to query if it makes
1176  * sense to enqueue a message into this AP queue device by calling
1177  * ap_queue_message(). The perspective is very short-term as the
1178  * state machine and device state(s) may change at any time.
1179  */
1180 bool ap_queue_usable(struct ap_queue *aq)
1181 {
1182 	bool rc = true;
1183 
1184 	spin_lock_bh(&aq->lock);
1185 
1186 	/* check for not configured or checkstopped */
1187 	if (!aq->config || aq->chkstop) {
1188 		rc = false;
1189 		goto unlock_and_out;
1190 	}
1191 
1192 	/* device state needs to be ok */
1193 	if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1194 		rc = false;
1195 		goto unlock_and_out;
1196 	}
1197 
1198 	/* SE guest's queues additionally need to be bound */
1199 	if (ap_q_needs_bind(aq) &&
1200 	    !(aq->se_bstate == AP_BS_Q_USABLE ||
1201 	      aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
1202 		rc = false;
1203 
1204 unlock_and_out:
1205 	spin_unlock_bh(&aq->lock);
1206 	return rc;
1207 }
1208 EXPORT_SYMBOL(ap_queue_usable);
1209 
1210 /**
1211  * ap_cancel_message(): Cancel a crypto request.
1212  * @aq: The AP device that has the message queued
1213  * @ap_msg: The message that is to be removed
1214  *
1215  * Cancel a crypto request. This is done by removing the request
1216  * from the device pending or request queue. Note that the
1217  * request stays on the AP queue. When it finishes the message
1218  * reply will be discarded because the psmid can't be found.
1219  */
1220 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1221 {
1222 	struct ap_message *tmp;
1223 
1224 	spin_lock_bh(&aq->lock);
1225 	if (!list_empty(&ap_msg->list)) {
1226 		list_for_each_entry(tmp, &aq->pendingq, list)
1227 			if (tmp->psmid == ap_msg->psmid) {
1228 				aq->pendingq_count--;
1229 				goto found;
1230 			}
1231 		aq->requestq_count--;
1232 found:
1233 		list_del_init(&ap_msg->list);
1234 	}
1235 	spin_unlock_bh(&aq->lock);
1236 }
1237 EXPORT_SYMBOL(ap_cancel_message);
1238 
1239 /**
1240  * __ap_flush_queue(): Flush requests.
1241  * @aq: Pointer to the AP queue
1242  *
1243  * Flush all requests from the request/pending queue of an AP device.
1244  */
1245 static void __ap_flush_queue(struct ap_queue *aq)
1246 {
1247 	struct ap_message *ap_msg, *next;
1248 
1249 	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1250 		list_del_init(&ap_msg->list);
1251 		aq->pendingq_count--;
1252 		ap_msg->rc = -EAGAIN;
1253 		ap_msg->receive(aq, ap_msg, NULL);
1254 	}
1255 	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1256 		list_del_init(&ap_msg->list);
1257 		aq->requestq_count--;
1258 		ap_msg->rc = -EAGAIN;
1259 		ap_msg->receive(aq, ap_msg, NULL);
1260 	}
1261 	aq->queue_count = 0;
1262 }
1263 
1264 void ap_flush_queue(struct ap_queue *aq)
1265 {
1266 	spin_lock_bh(&aq->lock);
1267 	__ap_flush_queue(aq);
1268 	spin_unlock_bh(&aq->lock);
1269 }
1270 EXPORT_SYMBOL(ap_flush_queue);
1271 
1272 void ap_queue_prepare_remove(struct ap_queue *aq)
1273 {
1274 	spin_lock_bh(&aq->lock);
1275 	/* flush queue */
1276 	__ap_flush_queue(aq);
1277 	/* move queue device state to SHUTDOWN in progress */
1278 	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1279 	spin_unlock_bh(&aq->lock);
1280 	del_timer_sync(&aq->timeout);
1281 }
1282 
1283 void ap_queue_remove(struct ap_queue *aq)
1284 {
1285 	/*
1286 	 * all messages have been flushed and the device state
1287 	 * is SHUTDOWN. Now reset with zero which also clears
1288 	 * the irq registration and move the device state
1289 	 * to the initial value AP_DEV_STATE_UNINITIATED.
1290 	 */
1291 	spin_lock_bh(&aq->lock);
1292 	ap_zapq(aq->qid, 0);
1293 	aq->dev_state = AP_DEV_STATE_UNINITIATED;
1294 	spin_unlock_bh(&aq->lock);
1295 }
1296 
1297 void _ap_queue_init_state(struct ap_queue *aq)
1298 {
1299 	aq->dev_state = AP_DEV_STATE_OPERATING;
1300 	aq->sm_state = AP_SM_STATE_RESET_START;
1301 	aq->last_err_rc = 0;
1302 	aq->assoc_idx = ASSOC_IDX_INVALID;
1303 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1304 }
1305 
1306 void ap_queue_init_state(struct ap_queue *aq)
1307 {
1308 	spin_lock_bh(&aq->lock);
1309 	_ap_queue_init_state(aq);
1310 	spin_unlock_bh(&aq->lock);
1311 }
1312 EXPORT_SYMBOL(ap_queue_init_state);
1313