xref: /linux/drivers/s390/crypto/ap_queue.c (revision 1f24458a1071f006e3f7449c08ae0f12af493923)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2016, 2023
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  *
6  * Adjunct processor bus, queue related code.
7  */
8 
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15 
16 #include "ap_bus.h"
17 #include "ap_debug.h"
18 
19 static void __ap_flush_queue(struct ap_queue *aq);
20 
21 /*
22  * some AP queue helper functions
23  */
24 
25 static inline bool ap_q_supports_bind(struct ap_queue *aq)
26 {
27 	return ap_test_bit(&aq->card->functions, AP_FUNC_EP11) ||
28 		ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL);
29 }
30 
31 static inline bool ap_q_supports_assoc(struct ap_queue *aq)
32 {
33 	return ap_test_bit(&aq->card->functions, AP_FUNC_EP11);
34 }
35 
36 static inline bool ap_q_needs_bind(struct ap_queue *aq)
37 {
38 	return ap_q_supports_bind(aq) && ap_sb_available();
39 }
40 
41 /**
42  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
43  * @aq: The AP queue
44  * @ind: the notification indicator byte
45  *
46  * Enables interruption on AP queue via ap_aqic(). Based on the return
47  * value it waits a while and tests the AP queue if interrupts
48  * have been switched on using ap_test_queue().
49  */
50 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
51 {
52 	union ap_qirq_ctrl qirqctrl = { .value = 0 };
53 	struct ap_queue_status status;
54 
55 	qirqctrl.ir = 1;
56 	qirqctrl.isc = AP_ISC;
57 	status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
58 	if (status.async)
59 		return -EPERM;
60 	switch (status.response_code) {
61 	case AP_RESPONSE_NORMAL:
62 	case AP_RESPONSE_OTHERWISE_CHANGED:
63 		return 0;
64 	case AP_RESPONSE_Q_NOT_AVAIL:
65 	case AP_RESPONSE_DECONFIGURED:
66 	case AP_RESPONSE_CHECKSTOPPED:
67 	case AP_RESPONSE_INVALID_ADDRESS:
68 		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
69 		       AP_QID_CARD(aq->qid),
70 		       AP_QID_QUEUE(aq->qid));
71 		return -EOPNOTSUPP;
72 	case AP_RESPONSE_RESET_IN_PROGRESS:
73 	case AP_RESPONSE_BUSY:
74 	default:
75 		return -EBUSY;
76 	}
77 }
78 
79 /**
80  * __ap_send(): Send message to adjunct processor queue.
81  * @qid: The AP queue number
82  * @psmid: The program supplied message identifier
83  * @msg: The message text
84  * @msglen: The message length
85  * @special: Special Bit
86  *
87  * Returns AP queue status structure.
88  * Condition code 1 on NQAP can't happen because the L bit is 1.
89  * Condition code 2 on NQAP also means the send is incomplete,
90  * because a segment boundary was reached. The NQAP is repeated.
91  */
92 static inline struct ap_queue_status
93 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
94 	  int special)
95 {
96 	if (special)
97 		qid |= 0x400000UL;
98 	return ap_nqap(qid, psmid, msg, msglen);
99 }
100 
101 /* State machine definitions and helpers */
102 
103 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
104 {
105 	return AP_SM_WAIT_NONE;
106 }
107 
108 /**
109  * ap_sm_recv(): Receive pending reply messages from an AP queue but do
110  *	not change the state of the device.
111  * @aq: pointer to the AP queue
112  *
113  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
114  */
115 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
116 {
117 	struct ap_queue_status status;
118 	struct ap_message *ap_msg;
119 	bool found = false;
120 	size_t reslen;
121 	unsigned long resgr0 = 0;
122 	int parts = 0;
123 
124 	/*
125 	 * DQAP loop until response code and resgr0 indicate that
126 	 * the msg is totally received. As we use the very same buffer
127 	 * the msg is overwritten with each invocation. That's intended
128 	 * and the receiver of the msg is informed with a msg rc code
129 	 * of EMSGSIZE in such a case.
130 	 */
131 	do {
132 		status = ap_dqap(aq->qid, &aq->reply->psmid,
133 				 aq->reply->msg, aq->reply->bufsize,
134 				 &aq->reply->len, &reslen, &resgr0);
135 		parts++;
136 	} while (status.response_code == 0xFF && resgr0 != 0);
137 
138 	switch (status.response_code) {
139 	case AP_RESPONSE_NORMAL:
140 		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
141 		if (!status.queue_empty && !aq->queue_count)
142 			aq->queue_count++;
143 		if (aq->queue_count > 0)
144 			mod_timer(&aq->timeout,
145 				  jiffies + aq->request_timeout);
146 		list_for_each_entry(ap_msg, &aq->pendingq, list) {
147 			if (ap_msg->psmid != aq->reply->psmid)
148 				continue;
149 			list_del_init(&ap_msg->list);
150 			aq->pendingq_count--;
151 			if (parts > 1) {
152 				ap_msg->rc = -EMSGSIZE;
153 				ap_msg->receive(aq, ap_msg, NULL);
154 			} else {
155 				ap_msg->receive(aq, ap_msg, aq->reply);
156 			}
157 			found = true;
158 			break;
159 		}
160 		if (!found) {
161 			AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
162 				    __func__, aq->reply->psmid,
163 				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
164 		}
165 		fallthrough;
166 	case AP_RESPONSE_NO_PENDING_REPLY:
167 		if (!status.queue_empty || aq->queue_count <= 0)
168 			break;
169 		/* The card shouldn't forget requests but who knows. */
170 		aq->queue_count = 0;
171 		list_splice_init(&aq->pendingq, &aq->requestq);
172 		aq->requestq_count += aq->pendingq_count;
173 		aq->pendingq_count = 0;
174 		break;
175 	default:
176 		break;
177 	}
178 	return status;
179 }
180 
181 /**
182  * ap_sm_read(): Receive pending reply messages from an AP queue.
183  * @aq: pointer to the AP queue
184  *
185  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
186  */
187 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
188 {
189 	struct ap_queue_status status;
190 
191 	if (!aq->reply)
192 		return AP_SM_WAIT_NONE;
193 	status = ap_sm_recv(aq);
194 	if (status.async)
195 		return AP_SM_WAIT_NONE;
196 	switch (status.response_code) {
197 	case AP_RESPONSE_NORMAL:
198 		if (aq->queue_count > 0) {
199 			aq->sm_state = AP_SM_STATE_WORKING;
200 			return AP_SM_WAIT_AGAIN;
201 		}
202 		aq->sm_state = AP_SM_STATE_IDLE;
203 		return AP_SM_WAIT_NONE;
204 	case AP_RESPONSE_NO_PENDING_REPLY:
205 		if (aq->queue_count > 0)
206 			return aq->interrupt ?
207 				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
208 		aq->sm_state = AP_SM_STATE_IDLE;
209 		return AP_SM_WAIT_NONE;
210 	default:
211 		aq->dev_state = AP_DEV_STATE_ERROR;
212 		aq->last_err_rc = status.response_code;
213 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
214 			    __func__, status.response_code,
215 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
216 		return AP_SM_WAIT_NONE;
217 	}
218 }
219 
220 /**
221  * ap_sm_write(): Send messages from the request queue to an AP queue.
222  * @aq: pointer to the AP queue
223  *
224  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
225  */
226 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
227 {
228 	struct ap_queue_status status;
229 	struct ap_message *ap_msg;
230 	ap_qid_t qid = aq->qid;
231 
232 	if (aq->requestq_count <= 0)
233 		return AP_SM_WAIT_NONE;
234 
235 	/* Start the next request on the queue. */
236 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
237 	status = __ap_send(qid, ap_msg->psmid,
238 			   ap_msg->msg, ap_msg->len,
239 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
240 	if (status.async)
241 		return AP_SM_WAIT_NONE;
242 	switch (status.response_code) {
243 	case AP_RESPONSE_NORMAL:
244 		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
245 		if (aq->queue_count == 1)
246 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
247 		list_move_tail(&ap_msg->list, &aq->pendingq);
248 		aq->requestq_count--;
249 		aq->pendingq_count++;
250 		if (aq->queue_count < aq->card->queue_depth) {
251 			aq->sm_state = AP_SM_STATE_WORKING;
252 			return AP_SM_WAIT_AGAIN;
253 		}
254 		fallthrough;
255 	case AP_RESPONSE_Q_FULL:
256 		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
257 		return aq->interrupt ?
258 			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
259 	case AP_RESPONSE_RESET_IN_PROGRESS:
260 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
261 		return AP_SM_WAIT_LOW_TIMEOUT;
262 	case AP_RESPONSE_INVALID_DOMAIN:
263 		AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
264 		fallthrough;
265 	case AP_RESPONSE_MESSAGE_TOO_BIG:
266 	case AP_RESPONSE_REQ_FAC_NOT_INST:
267 		list_del_init(&ap_msg->list);
268 		aq->requestq_count--;
269 		ap_msg->rc = -EINVAL;
270 		ap_msg->receive(aq, ap_msg, NULL);
271 		return AP_SM_WAIT_AGAIN;
272 	default:
273 		aq->dev_state = AP_DEV_STATE_ERROR;
274 		aq->last_err_rc = status.response_code;
275 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
276 			    __func__, status.response_code,
277 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
278 		return AP_SM_WAIT_NONE;
279 	}
280 }
281 
282 /**
283  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
284  * @aq: pointer to the AP queue
285  *
286  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
287  */
288 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
289 {
290 	return min(ap_sm_read(aq), ap_sm_write(aq));
291 }
292 
293 /**
294  * ap_sm_reset(): Reset an AP queue.
295  * @aq: The AP queue
296  *
297  * Submit the Reset command to an AP queue.
298  */
299 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
300 {
301 	struct ap_queue_status status;
302 
303 	status = ap_rapq(aq->qid, aq->rapq_fbit);
304 	if (status.async)
305 		return AP_SM_WAIT_NONE;
306 	switch (status.response_code) {
307 	case AP_RESPONSE_NORMAL:
308 	case AP_RESPONSE_RESET_IN_PROGRESS:
309 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
310 		aq->interrupt = false;
311 		aq->rapq_fbit = 0;
312 		aq->se_bound = false;
313 		return AP_SM_WAIT_LOW_TIMEOUT;
314 	default:
315 		aq->dev_state = AP_DEV_STATE_ERROR;
316 		aq->last_err_rc = status.response_code;
317 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
318 			    __func__, status.response_code,
319 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
320 		return AP_SM_WAIT_NONE;
321 	}
322 }
323 
324 /**
325  * ap_sm_reset_wait(): Test queue for completion of the reset operation
326  * @aq: pointer to the AP queue
327  *
328  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
329  */
330 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
331 {
332 	struct ap_queue_status status;
333 	void *lsi_ptr;
334 
335 	if (aq->queue_count > 0 && aq->reply)
336 		/* Try to read a completed message and get the status */
337 		status = ap_sm_recv(aq);
338 	else
339 		/* Get the status with TAPQ */
340 		status = ap_tapq(aq->qid, NULL);
341 
342 	switch (status.response_code) {
343 	case AP_RESPONSE_NORMAL:
344 		lsi_ptr = ap_airq_ptr();
345 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
346 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
347 		else
348 			aq->sm_state = (aq->queue_count > 0) ?
349 				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
350 		return AP_SM_WAIT_AGAIN;
351 	case AP_RESPONSE_BUSY:
352 	case AP_RESPONSE_RESET_IN_PROGRESS:
353 		return AP_SM_WAIT_LOW_TIMEOUT;
354 	case AP_RESPONSE_Q_NOT_AVAIL:
355 	case AP_RESPONSE_DECONFIGURED:
356 	case AP_RESPONSE_CHECKSTOPPED:
357 	default:
358 		aq->dev_state = AP_DEV_STATE_ERROR;
359 		aq->last_err_rc = status.response_code;
360 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
361 			    __func__, status.response_code,
362 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
363 		return AP_SM_WAIT_NONE;
364 	}
365 }
366 
367 /**
368  * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
369  * @aq: pointer to the AP queue
370  *
371  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
372  */
373 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
374 {
375 	struct ap_queue_status status;
376 
377 	if (aq->queue_count > 0 && aq->reply)
378 		/* Try to read a completed message and get the status */
379 		status = ap_sm_recv(aq);
380 	else
381 		/* Get the status with TAPQ */
382 		status = ap_tapq(aq->qid, NULL);
383 
384 	if (status.irq_enabled == 1) {
385 		/* Irqs are now enabled */
386 		aq->interrupt = true;
387 		aq->sm_state = (aq->queue_count > 0) ?
388 			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
389 	}
390 
391 	switch (status.response_code) {
392 	case AP_RESPONSE_NORMAL:
393 		if (aq->queue_count > 0)
394 			return AP_SM_WAIT_AGAIN;
395 		fallthrough;
396 	case AP_RESPONSE_NO_PENDING_REPLY:
397 		return AP_SM_WAIT_LOW_TIMEOUT;
398 	default:
399 		aq->dev_state = AP_DEV_STATE_ERROR;
400 		aq->last_err_rc = status.response_code;
401 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
402 			    __func__, status.response_code,
403 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
404 		return AP_SM_WAIT_NONE;
405 	}
406 }
407 
408 /**
409  * ap_sm_assoc_wait(): Test queue for completion of a pending
410  *		       association request.
411  * @aq: pointer to the AP queue
412  */
413 static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
414 {
415 	struct ap_queue_status status;
416 	struct ap_tapq_gr2 info;
417 
418 	status = ap_test_queue(aq->qid, 1, &info);
419 	/* handle asynchronous error on this queue */
420 	if (status.async && status.response_code) {
421 		aq->dev_state = AP_DEV_STATE_ERROR;
422 		aq->last_err_rc = status.response_code;
423 		AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
424 			    __func__, status.response_code,
425 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
426 		return AP_SM_WAIT_NONE;
427 	}
428 	if (status.response_code > AP_RESPONSE_BUSY) {
429 		aq->dev_state = AP_DEV_STATE_ERROR;
430 		aq->last_err_rc = status.response_code;
431 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
432 			    __func__, status.response_code,
433 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
434 		return AP_SM_WAIT_NONE;
435 	}
436 
437 	/* check bs bits */
438 	switch (info.bs) {
439 	case AP_BS_Q_USABLE:
440 		/* association is through */
441 		aq->sm_state = AP_SM_STATE_IDLE;
442 		AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
443 			   __func__, AP_QID_CARD(aq->qid),
444 			   AP_QID_QUEUE(aq->qid), aq->assoc_idx);
445 		return AP_SM_WAIT_NONE;
446 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
447 		/* association still pending */
448 		return AP_SM_WAIT_LOW_TIMEOUT;
449 	default:
450 		/* reset from 'outside' happened or no idea at all */
451 		aq->assoc_idx = ASSOC_IDX_INVALID;
452 		aq->dev_state = AP_DEV_STATE_ERROR;
453 		aq->last_err_rc = status.response_code;
454 		AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
455 			    __func__, info.bs,
456 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
457 		return AP_SM_WAIT_NONE;
458 	}
459 }
460 
461 /*
462  * AP state machine jump table
463  */
464 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
465 	[AP_SM_STATE_RESET_START] = {
466 		[AP_SM_EVENT_POLL] = ap_sm_reset,
467 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
468 	},
469 	[AP_SM_STATE_RESET_WAIT] = {
470 		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
471 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
472 	},
473 	[AP_SM_STATE_SETIRQ_WAIT] = {
474 		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
475 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
476 	},
477 	[AP_SM_STATE_IDLE] = {
478 		[AP_SM_EVENT_POLL] = ap_sm_write,
479 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
480 	},
481 	[AP_SM_STATE_WORKING] = {
482 		[AP_SM_EVENT_POLL] = ap_sm_read_write,
483 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
484 	},
485 	[AP_SM_STATE_QUEUE_FULL] = {
486 		[AP_SM_EVENT_POLL] = ap_sm_read,
487 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
488 	},
489 	[AP_SM_STATE_ASSOC_WAIT] = {
490 		[AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
491 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
492 	},
493 };
494 
495 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
496 {
497 	if (aq->config && !aq->chkstop &&
498 	    aq->dev_state > AP_DEV_STATE_UNINITIATED)
499 		return ap_jumptable[aq->sm_state][event](aq);
500 	else
501 		return AP_SM_WAIT_NONE;
502 }
503 
504 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
505 {
506 	enum ap_sm_wait wait;
507 
508 	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
509 		;
510 	return wait;
511 }
512 
513 /*
514  * AP queue related attributes.
515  */
516 static ssize_t request_count_show(struct device *dev,
517 				  struct device_attribute *attr,
518 				  char *buf)
519 {
520 	struct ap_queue *aq = to_ap_queue(dev);
521 	bool valid = false;
522 	u64 req_cnt;
523 
524 	spin_lock_bh(&aq->lock);
525 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
526 		req_cnt = aq->total_request_count;
527 		valid = true;
528 	}
529 	spin_unlock_bh(&aq->lock);
530 
531 	if (valid)
532 		return sysfs_emit(buf, "%llu\n", req_cnt);
533 	else
534 		return sysfs_emit(buf, "-\n");
535 }
536 
537 static ssize_t request_count_store(struct device *dev,
538 				   struct device_attribute *attr,
539 				   const char *buf, size_t count)
540 {
541 	struct ap_queue *aq = to_ap_queue(dev);
542 
543 	spin_lock_bh(&aq->lock);
544 	aq->total_request_count = 0;
545 	spin_unlock_bh(&aq->lock);
546 
547 	return count;
548 }
549 
550 static DEVICE_ATTR_RW(request_count);
551 
552 static ssize_t requestq_count_show(struct device *dev,
553 				   struct device_attribute *attr, char *buf)
554 {
555 	struct ap_queue *aq = to_ap_queue(dev);
556 	unsigned int reqq_cnt = 0;
557 
558 	spin_lock_bh(&aq->lock);
559 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
560 		reqq_cnt = aq->requestq_count;
561 	spin_unlock_bh(&aq->lock);
562 	return sysfs_emit(buf, "%d\n", reqq_cnt);
563 }
564 
565 static DEVICE_ATTR_RO(requestq_count);
566 
567 static ssize_t pendingq_count_show(struct device *dev,
568 				   struct device_attribute *attr, char *buf)
569 {
570 	struct ap_queue *aq = to_ap_queue(dev);
571 	unsigned int penq_cnt = 0;
572 
573 	spin_lock_bh(&aq->lock);
574 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
575 		penq_cnt = aq->pendingq_count;
576 	spin_unlock_bh(&aq->lock);
577 	return sysfs_emit(buf, "%d\n", penq_cnt);
578 }
579 
580 static DEVICE_ATTR_RO(pendingq_count);
581 
582 static ssize_t reset_show(struct device *dev,
583 			  struct device_attribute *attr, char *buf)
584 {
585 	struct ap_queue *aq = to_ap_queue(dev);
586 	int rc = 0;
587 
588 	spin_lock_bh(&aq->lock);
589 	switch (aq->sm_state) {
590 	case AP_SM_STATE_RESET_START:
591 	case AP_SM_STATE_RESET_WAIT:
592 		rc = sysfs_emit(buf, "Reset in progress.\n");
593 		break;
594 	case AP_SM_STATE_WORKING:
595 	case AP_SM_STATE_QUEUE_FULL:
596 		rc = sysfs_emit(buf, "Reset Timer armed.\n");
597 		break;
598 	default:
599 		rc = sysfs_emit(buf, "No Reset Timer set.\n");
600 	}
601 	spin_unlock_bh(&aq->lock);
602 	return rc;
603 }
604 
605 static ssize_t reset_store(struct device *dev,
606 			   struct device_attribute *attr,
607 			   const char *buf, size_t count)
608 {
609 	struct ap_queue *aq = to_ap_queue(dev);
610 
611 	spin_lock_bh(&aq->lock);
612 	__ap_flush_queue(aq);
613 	aq->sm_state = AP_SM_STATE_RESET_START;
614 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
615 	spin_unlock_bh(&aq->lock);
616 
617 	AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
618 		    __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
619 
620 	return count;
621 }
622 
623 static DEVICE_ATTR_RW(reset);
624 
625 static ssize_t interrupt_show(struct device *dev,
626 			      struct device_attribute *attr, char *buf)
627 {
628 	struct ap_queue *aq = to_ap_queue(dev);
629 	int rc = 0;
630 
631 	spin_lock_bh(&aq->lock);
632 	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
633 		rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
634 	else if (aq->interrupt)
635 		rc = sysfs_emit(buf, "Interrupts enabled.\n");
636 	else
637 		rc = sysfs_emit(buf, "Interrupts disabled.\n");
638 	spin_unlock_bh(&aq->lock);
639 	return rc;
640 }
641 
642 static DEVICE_ATTR_RO(interrupt);
643 
644 static ssize_t config_show(struct device *dev,
645 			   struct device_attribute *attr, char *buf)
646 {
647 	struct ap_queue *aq = to_ap_queue(dev);
648 	int rc;
649 
650 	spin_lock_bh(&aq->lock);
651 	rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
652 	spin_unlock_bh(&aq->lock);
653 	return rc;
654 }
655 
656 static DEVICE_ATTR_RO(config);
657 
658 static ssize_t chkstop_show(struct device *dev,
659 			    struct device_attribute *attr, char *buf)
660 {
661 	struct ap_queue *aq = to_ap_queue(dev);
662 	int rc;
663 
664 	spin_lock_bh(&aq->lock);
665 	rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
666 	spin_unlock_bh(&aq->lock);
667 	return rc;
668 }
669 
670 static DEVICE_ATTR_RO(chkstop);
671 
672 static ssize_t ap_functions_show(struct device *dev,
673 				 struct device_attribute *attr, char *buf)
674 {
675 	struct ap_queue *aq = to_ap_queue(dev);
676 	struct ap_queue_status status;
677 	struct ap_tapq_gr2 info;
678 
679 	status = ap_test_queue(aq->qid, 1, &info);
680 	if (status.response_code > AP_RESPONSE_BUSY) {
681 		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
682 			   __func__, status.response_code,
683 			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
684 		return -EIO;
685 	}
686 
687 	return sysfs_emit(buf, "0x%08X\n", info.fac);
688 }
689 
690 static DEVICE_ATTR_RO(ap_functions);
691 
692 #ifdef CONFIG_ZCRYPT_DEBUG
693 static ssize_t states_show(struct device *dev,
694 			   struct device_attribute *attr, char *buf)
695 {
696 	struct ap_queue *aq = to_ap_queue(dev);
697 	int rc = 0;
698 
699 	spin_lock_bh(&aq->lock);
700 	/* queue device state */
701 	switch (aq->dev_state) {
702 	case AP_DEV_STATE_UNINITIATED:
703 		rc = sysfs_emit(buf, "UNINITIATED\n");
704 		break;
705 	case AP_DEV_STATE_OPERATING:
706 		rc = sysfs_emit(buf, "OPERATING");
707 		break;
708 	case AP_DEV_STATE_SHUTDOWN:
709 		rc = sysfs_emit(buf, "SHUTDOWN");
710 		break;
711 	case AP_DEV_STATE_ERROR:
712 		rc = sysfs_emit(buf, "ERROR");
713 		break;
714 	default:
715 		rc = sysfs_emit(buf, "UNKNOWN");
716 	}
717 	/* state machine state */
718 	if (aq->dev_state) {
719 		switch (aq->sm_state) {
720 		case AP_SM_STATE_RESET_START:
721 			rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
722 			break;
723 		case AP_SM_STATE_RESET_WAIT:
724 			rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
725 			break;
726 		case AP_SM_STATE_SETIRQ_WAIT:
727 			rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
728 			break;
729 		case AP_SM_STATE_IDLE:
730 			rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
731 			break;
732 		case AP_SM_STATE_WORKING:
733 			rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
734 			break;
735 		case AP_SM_STATE_QUEUE_FULL:
736 			rc += sysfs_emit_at(buf, rc, " [FULL]\n");
737 			break;
738 		case AP_SM_STATE_ASSOC_WAIT:
739 			rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
740 			break;
741 		default:
742 			rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
743 		}
744 	}
745 	spin_unlock_bh(&aq->lock);
746 
747 	return rc;
748 }
749 static DEVICE_ATTR_RO(states);
750 
751 static ssize_t last_err_rc_show(struct device *dev,
752 				struct device_attribute *attr, char *buf)
753 {
754 	struct ap_queue *aq = to_ap_queue(dev);
755 	int rc;
756 
757 	spin_lock_bh(&aq->lock);
758 	rc = aq->last_err_rc;
759 	spin_unlock_bh(&aq->lock);
760 
761 	switch (rc) {
762 	case AP_RESPONSE_NORMAL:
763 		return sysfs_emit(buf, "NORMAL\n");
764 	case AP_RESPONSE_Q_NOT_AVAIL:
765 		return sysfs_emit(buf, "Q_NOT_AVAIL\n");
766 	case AP_RESPONSE_RESET_IN_PROGRESS:
767 		return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
768 	case AP_RESPONSE_DECONFIGURED:
769 		return sysfs_emit(buf, "DECONFIGURED\n");
770 	case AP_RESPONSE_CHECKSTOPPED:
771 		return sysfs_emit(buf, "CHECKSTOPPED\n");
772 	case AP_RESPONSE_BUSY:
773 		return sysfs_emit(buf, "BUSY\n");
774 	case AP_RESPONSE_INVALID_ADDRESS:
775 		return sysfs_emit(buf, "INVALID_ADDRESS\n");
776 	case AP_RESPONSE_OTHERWISE_CHANGED:
777 		return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
778 	case AP_RESPONSE_Q_FULL:
779 		return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
780 	case AP_RESPONSE_INDEX_TOO_BIG:
781 		return sysfs_emit(buf, "INDEX_TOO_BIG\n");
782 	case AP_RESPONSE_NO_FIRST_PART:
783 		return sysfs_emit(buf, "NO_FIRST_PART\n");
784 	case AP_RESPONSE_MESSAGE_TOO_BIG:
785 		return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
786 	case AP_RESPONSE_REQ_FAC_NOT_INST:
787 		return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
788 	default:
789 		return sysfs_emit(buf, "response code %d\n", rc);
790 	}
791 }
792 static DEVICE_ATTR_RO(last_err_rc);
793 #endif
794 
795 static struct attribute *ap_queue_dev_attrs[] = {
796 	&dev_attr_request_count.attr,
797 	&dev_attr_requestq_count.attr,
798 	&dev_attr_pendingq_count.attr,
799 	&dev_attr_reset.attr,
800 	&dev_attr_interrupt.attr,
801 	&dev_attr_config.attr,
802 	&dev_attr_chkstop.attr,
803 	&dev_attr_ap_functions.attr,
804 #ifdef CONFIG_ZCRYPT_DEBUG
805 	&dev_attr_states.attr,
806 	&dev_attr_last_err_rc.attr,
807 #endif
808 	NULL
809 };
810 
811 static struct attribute_group ap_queue_dev_attr_group = {
812 	.attrs = ap_queue_dev_attrs
813 };
814 
815 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
816 	&ap_queue_dev_attr_group,
817 	NULL
818 };
819 
820 static struct device_type ap_queue_type = {
821 	.name = "ap_queue",
822 	.groups = ap_queue_dev_attr_groups,
823 };
824 
825 static ssize_t se_bind_show(struct device *dev,
826 			    struct device_attribute *attr, char *buf)
827 {
828 	struct ap_queue *aq = to_ap_queue(dev);
829 	struct ap_queue_status status;
830 	struct ap_tapq_gr2 info;
831 
832 	if (!ap_q_supports_bind(aq))
833 		return sysfs_emit(buf, "-\n");
834 
835 	status = ap_test_queue(aq->qid, 1, &info);
836 	if (status.response_code > AP_RESPONSE_BUSY) {
837 		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
838 			   __func__, status.response_code,
839 			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
840 		return -EIO;
841 	}
842 	switch (info.bs) {
843 	case AP_BS_Q_USABLE:
844 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
845 		return sysfs_emit(buf, "bound\n");
846 	default:
847 		return sysfs_emit(buf, "unbound\n");
848 	}
849 }
850 
851 static ssize_t se_bind_store(struct device *dev,
852 			     struct device_attribute *attr,
853 			     const char *buf, size_t count)
854 {
855 	struct ap_queue *aq = to_ap_queue(dev);
856 	struct ap_queue_status status;
857 	bool value;
858 	int rc;
859 
860 	if (!ap_q_supports_bind(aq))
861 		return -EINVAL;
862 
863 	/* only 0 (unbind) and 1 (bind) allowed */
864 	rc = kstrtobool(buf, &value);
865 	if (rc)
866 		return rc;
867 
868 	if (value) {
869 		/* bind, do BAPQ */
870 		spin_lock_bh(&aq->lock);
871 		if (aq->sm_state < AP_SM_STATE_IDLE) {
872 			spin_unlock_bh(&aq->lock);
873 			return -EBUSY;
874 		}
875 		status = ap_bapq(aq->qid);
876 		spin_unlock_bh(&aq->lock);
877 		if (!status.response_code) {
878 			aq->se_bound = true;
879 			AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
880 				    AP_QID_CARD(aq->qid),
881 				    AP_QID_QUEUE(aq->qid));
882 		} else {
883 			AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
884 				    __func__, status.response_code,
885 				    AP_QID_CARD(aq->qid),
886 				    AP_QID_QUEUE(aq->qid));
887 			return -EIO;
888 		}
889 	} else {
890 		/* unbind, set F bit arg and trigger RAPQ */
891 		spin_lock_bh(&aq->lock);
892 		__ap_flush_queue(aq);
893 		aq->rapq_fbit = 1;
894 		aq->assoc_idx = ASSOC_IDX_INVALID;
895 		aq->sm_state = AP_SM_STATE_RESET_START;
896 		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
897 		spin_unlock_bh(&aq->lock);
898 	}
899 
900 	return count;
901 }
902 
903 static DEVICE_ATTR_RW(se_bind);
904 
905 static ssize_t se_associate_show(struct device *dev,
906 				 struct device_attribute *attr, char *buf)
907 {
908 	struct ap_queue *aq = to_ap_queue(dev);
909 	struct ap_queue_status status;
910 	struct ap_tapq_gr2 info;
911 
912 	if (!ap_q_supports_assoc(aq))
913 		return sysfs_emit(buf, "-\n");
914 
915 	status = ap_test_queue(aq->qid, 1, &info);
916 	if (status.response_code > AP_RESPONSE_BUSY) {
917 		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
918 			   __func__, status.response_code,
919 			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
920 		return -EIO;
921 	}
922 
923 	switch (info.bs) {
924 	case AP_BS_Q_USABLE:
925 		if (aq->assoc_idx == ASSOC_IDX_INVALID) {
926 			AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
927 			return -EIO;
928 		}
929 		return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
930 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
931 		if (aq->assoc_idx != ASSOC_IDX_INVALID)
932 			return sysfs_emit(buf, "association pending\n");
933 		fallthrough;
934 	default:
935 		return sysfs_emit(buf, "unassociated\n");
936 	}
937 }
938 
939 static ssize_t se_associate_store(struct device *dev,
940 				  struct device_attribute *attr,
941 				  const char *buf, size_t count)
942 {
943 	struct ap_queue *aq = to_ap_queue(dev);
944 	struct ap_queue_status status;
945 	unsigned int value;
946 	int rc;
947 
948 	if (!ap_q_supports_assoc(aq))
949 		return -EINVAL;
950 
951 	/* association index needs to be >= 0 */
952 	rc = kstrtouint(buf, 0, &value);
953 	if (rc)
954 		return rc;
955 	if (value >= ASSOC_IDX_INVALID)
956 		return -EINVAL;
957 
958 	spin_lock_bh(&aq->lock);
959 
960 	/* sm should be in idle state */
961 	if (aq->sm_state != AP_SM_STATE_IDLE) {
962 		spin_unlock_bh(&aq->lock);
963 		return -EBUSY;
964 	}
965 
966 	/* already associated or association pending ? */
967 	if (aq->assoc_idx != ASSOC_IDX_INVALID) {
968 		spin_unlock_bh(&aq->lock);
969 		return -EINVAL;
970 	}
971 
972 	/* trigger the asynchronous association request */
973 	status = ap_aapq(aq->qid, value);
974 	switch (status.response_code) {
975 	case AP_RESPONSE_NORMAL:
976 	case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
977 		aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
978 		aq->assoc_idx = value;
979 		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
980 		spin_unlock_bh(&aq->lock);
981 		break;
982 	default:
983 		spin_unlock_bh(&aq->lock);
984 		AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
985 			    __func__, status.response_code,
986 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
987 		return -EIO;
988 	}
989 
990 	return count;
991 }
992 
993 static DEVICE_ATTR_RW(se_associate);
994 
995 static struct attribute *ap_queue_dev_sb_attrs[] = {
996 	&dev_attr_se_bind.attr,
997 	&dev_attr_se_associate.attr,
998 	NULL
999 };
1000 
1001 static struct attribute_group ap_queue_dev_sb_attr_group = {
1002 	.attrs = ap_queue_dev_sb_attrs
1003 };
1004 
1005 static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1006 	&ap_queue_dev_sb_attr_group,
1007 	NULL
1008 };
1009 
1010 static void ap_queue_device_release(struct device *dev)
1011 {
1012 	struct ap_queue *aq = to_ap_queue(dev);
1013 
1014 	spin_lock_bh(&ap_queues_lock);
1015 	hash_del(&aq->hnode);
1016 	spin_unlock_bh(&ap_queues_lock);
1017 
1018 	kfree(aq);
1019 }
1020 
1021 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
1022 {
1023 	struct ap_queue *aq;
1024 
1025 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1026 	if (!aq)
1027 		return NULL;
1028 	aq->ap_dev.device.release = ap_queue_device_release;
1029 	aq->ap_dev.device.type = &ap_queue_type;
1030 	aq->ap_dev.device_type = device_type;
1031 	// add optional SE secure binding attributes group
1032 	if (ap_sb_available() && is_prot_virt_guest())
1033 		aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1034 	aq->qid = qid;
1035 	aq->interrupt = false;
1036 	spin_lock_init(&aq->lock);
1037 	INIT_LIST_HEAD(&aq->pendingq);
1038 	INIT_LIST_HEAD(&aq->requestq);
1039 	timer_setup(&aq->timeout, ap_request_timeout, 0);
1040 
1041 	return aq;
1042 }
1043 
1044 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1045 {
1046 	aq->reply = reply;
1047 
1048 	spin_lock_bh(&aq->lock);
1049 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1050 	spin_unlock_bh(&aq->lock);
1051 }
1052 EXPORT_SYMBOL(ap_queue_init_reply);
1053 
1054 /**
1055  * ap_queue_message(): Queue a request to an AP device.
1056  * @aq: The AP device to queue the message to
1057  * @ap_msg: The message that is to be added
1058  */
1059 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1060 {
1061 	int rc = 0;
1062 
1063 	/* msg needs to have a valid receive-callback */
1064 	BUG_ON(!ap_msg->receive);
1065 
1066 	spin_lock_bh(&aq->lock);
1067 
1068 	/* only allow to queue new messages if device state is ok */
1069 	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1070 		list_add_tail(&ap_msg->list, &aq->requestq);
1071 		aq->requestq_count++;
1072 		aq->total_request_count++;
1073 		atomic64_inc(&aq->card->total_request_count);
1074 	} else {
1075 		rc = -ENODEV;
1076 	}
1077 
1078 	/* Send/receive as many request from the queue as possible. */
1079 	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1080 
1081 	spin_unlock_bh(&aq->lock);
1082 
1083 	return rc;
1084 }
1085 EXPORT_SYMBOL(ap_queue_message);
1086 
1087 /**
1088  * ap_queue_usable(): Check if queue is usable just now.
1089  * @aq: The AP queue device to test for usability.
1090  * This function is intended for the scheduler to query if it makes
1091  * sense to enqueue a message into this AP queue device by calling
1092  * ap_queue_message(). The perspective is very short-term as the
1093  * state machine and device state(s) may change at any time.
1094  */
1095 bool ap_queue_usable(struct ap_queue *aq)
1096 {
1097 	bool rc = true;
1098 
1099 	spin_lock_bh(&aq->lock);
1100 
1101 	/* check for not configured or checkstopped */
1102 	if (!aq->config || aq->chkstop) {
1103 		rc = false;
1104 		goto unlock_and_out;
1105 	}
1106 
1107 	/* device state needs to be ok */
1108 	if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1109 		rc = false;
1110 		goto unlock_and_out;
1111 	}
1112 
1113 	/* SE guest's queues additionally need to be bound */
1114 	if (ap_q_needs_bind(aq) && !aq->se_bound)
1115 		rc = false;
1116 
1117 unlock_and_out:
1118 	spin_unlock_bh(&aq->lock);
1119 	return rc;
1120 }
1121 EXPORT_SYMBOL(ap_queue_usable);
1122 
1123 /**
1124  * ap_cancel_message(): Cancel a crypto request.
1125  * @aq: The AP device that has the message queued
1126  * @ap_msg: The message that is to be removed
1127  *
1128  * Cancel a crypto request. This is done by removing the request
1129  * from the device pending or request queue. Note that the
1130  * request stays on the AP queue. When it finishes the message
1131  * reply will be discarded because the psmid can't be found.
1132  */
1133 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1134 {
1135 	struct ap_message *tmp;
1136 
1137 	spin_lock_bh(&aq->lock);
1138 	if (!list_empty(&ap_msg->list)) {
1139 		list_for_each_entry(tmp, &aq->pendingq, list)
1140 			if (tmp->psmid == ap_msg->psmid) {
1141 				aq->pendingq_count--;
1142 				goto found;
1143 			}
1144 		aq->requestq_count--;
1145 found:
1146 		list_del_init(&ap_msg->list);
1147 	}
1148 	spin_unlock_bh(&aq->lock);
1149 }
1150 EXPORT_SYMBOL(ap_cancel_message);
1151 
1152 /**
1153  * __ap_flush_queue(): Flush requests.
1154  * @aq: Pointer to the AP queue
1155  *
1156  * Flush all requests from the request/pending queue of an AP device.
1157  */
1158 static void __ap_flush_queue(struct ap_queue *aq)
1159 {
1160 	struct ap_message *ap_msg, *next;
1161 
1162 	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1163 		list_del_init(&ap_msg->list);
1164 		aq->pendingq_count--;
1165 		ap_msg->rc = -EAGAIN;
1166 		ap_msg->receive(aq, ap_msg, NULL);
1167 	}
1168 	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1169 		list_del_init(&ap_msg->list);
1170 		aq->requestq_count--;
1171 		ap_msg->rc = -EAGAIN;
1172 		ap_msg->receive(aq, ap_msg, NULL);
1173 	}
1174 	aq->queue_count = 0;
1175 }
1176 
1177 void ap_flush_queue(struct ap_queue *aq)
1178 {
1179 	spin_lock_bh(&aq->lock);
1180 	__ap_flush_queue(aq);
1181 	spin_unlock_bh(&aq->lock);
1182 }
1183 EXPORT_SYMBOL(ap_flush_queue);
1184 
1185 void ap_queue_prepare_remove(struct ap_queue *aq)
1186 {
1187 	spin_lock_bh(&aq->lock);
1188 	/* flush queue */
1189 	__ap_flush_queue(aq);
1190 	/* move queue device state to SHUTDOWN in progress */
1191 	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1192 	spin_unlock_bh(&aq->lock);
1193 	del_timer_sync(&aq->timeout);
1194 }
1195 
1196 void ap_queue_remove(struct ap_queue *aq)
1197 {
1198 	/*
1199 	 * all messages have been flushed and the device state
1200 	 * is SHUTDOWN. Now reset with zero which also clears
1201 	 * the irq registration and move the device state
1202 	 * to the initial value AP_DEV_STATE_UNINITIATED.
1203 	 */
1204 	spin_lock_bh(&aq->lock);
1205 	ap_zapq(aq->qid, 0);
1206 	aq->dev_state = AP_DEV_STATE_UNINITIATED;
1207 	spin_unlock_bh(&aq->lock);
1208 }
1209 
1210 void _ap_queue_init_state(struct ap_queue *aq)
1211 {
1212 	aq->dev_state = AP_DEV_STATE_OPERATING;
1213 	aq->sm_state = AP_SM_STATE_RESET_START;
1214 	aq->last_err_rc = 0;
1215 	aq->assoc_idx = ASSOC_IDX_INVALID;
1216 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1217 }
1218 
1219 void ap_queue_init_state(struct ap_queue *aq)
1220 {
1221 	spin_lock_bh(&aq->lock);
1222 	_ap_queue_init_state(aq);
1223 	spin_unlock_bh(&aq->lock);
1224 }
1225 EXPORT_SYMBOL(ap_queue_init_state);
1226