xref: /linux/drivers/s390/crypto/ap_queue.c (revision 0e2b2a76278153d1ac312b0691cb65dabb9aef3e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2016
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  *
6  * Adjunct processor bus, queue related code.
7  */
8 
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15 
16 #include "ap_bus.h"
17 #include "ap_debug.h"
18 
19 static void __ap_flush_queue(struct ap_queue *aq);
20 
21 /*
22  * some AP queue helper functions
23  */
24 
25 static inline bool ap_q_supports_bind(struct ap_queue *aq)
26 {
27 	return ap_test_bit(&aq->card->functions, AP_FUNC_EP11) ||
28 		ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL);
29 }
30 
31 static inline bool ap_q_supports_assoc(struct ap_queue *aq)
32 {
33 	return ap_test_bit(&aq->card->functions, AP_FUNC_EP11);
34 }
35 
36 /**
37  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
38  * @aq: The AP queue
39  * @ind: the notification indicator byte
40  *
41  * Enables interruption on AP queue via ap_aqic(). Based on the return
42  * value it waits a while and tests the AP queue if interrupts
43  * have been switched on using ap_test_queue().
44  */
45 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
46 {
47 	union ap_qirq_ctrl qirqctrl = { .value = 0 };
48 	struct ap_queue_status status;
49 
50 	qirqctrl.ir = 1;
51 	qirqctrl.isc = AP_ISC;
52 	status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
53 	if (status.async)
54 		return -EPERM;
55 	switch (status.response_code) {
56 	case AP_RESPONSE_NORMAL:
57 	case AP_RESPONSE_OTHERWISE_CHANGED:
58 		return 0;
59 	case AP_RESPONSE_Q_NOT_AVAIL:
60 	case AP_RESPONSE_DECONFIGURED:
61 	case AP_RESPONSE_CHECKSTOPPED:
62 	case AP_RESPONSE_INVALID_ADDRESS:
63 		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
64 		       AP_QID_CARD(aq->qid),
65 		       AP_QID_QUEUE(aq->qid));
66 		return -EOPNOTSUPP;
67 	case AP_RESPONSE_RESET_IN_PROGRESS:
68 	case AP_RESPONSE_BUSY:
69 	default:
70 		return -EBUSY;
71 	}
72 }
73 
74 /**
75  * __ap_send(): Send message to adjunct processor queue.
76  * @qid: The AP queue number
77  * @psmid: The program supplied message identifier
78  * @msg: The message text
79  * @msglen: The message length
80  * @special: Special Bit
81  *
82  * Returns AP queue status structure.
83  * Condition code 1 on NQAP can't happen because the L bit is 1.
84  * Condition code 2 on NQAP also means the send is incomplete,
85  * because a segment boundary was reached. The NQAP is repeated.
86  */
87 static inline struct ap_queue_status
88 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
89 	  int special)
90 {
91 	if (special)
92 		qid |= 0x400000UL;
93 	return ap_nqap(qid, psmid, msg, msglen);
94 }
95 
96 int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen)
97 {
98 	struct ap_queue_status status;
99 
100 	status = __ap_send(qid, psmid, msg, msglen, 0);
101 	if (status.async)
102 		return -EPERM;
103 	switch (status.response_code) {
104 	case AP_RESPONSE_NORMAL:
105 		return 0;
106 	case AP_RESPONSE_Q_FULL:
107 	case AP_RESPONSE_RESET_IN_PROGRESS:
108 		return -EBUSY;
109 	case AP_RESPONSE_REQ_FAC_NOT_INST:
110 		return -EINVAL;
111 	default:	/* Device is gone. */
112 		return -ENODEV;
113 	}
114 }
115 EXPORT_SYMBOL(ap_send);
116 
117 int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen)
118 {
119 	struct ap_queue_status status;
120 
121 	if (!msg)
122 		return -EINVAL;
123 	status = ap_dqap(qid, psmid, msg, msglen, NULL, NULL, NULL);
124 	if (status.async)
125 		return -EPERM;
126 	switch (status.response_code) {
127 	case AP_RESPONSE_NORMAL:
128 		return 0;
129 	case AP_RESPONSE_NO_PENDING_REPLY:
130 		if (status.queue_empty)
131 			return -ENOENT;
132 		return -EBUSY;
133 	case AP_RESPONSE_RESET_IN_PROGRESS:
134 		return -EBUSY;
135 	default:
136 		return -ENODEV;
137 	}
138 }
139 EXPORT_SYMBOL(ap_recv);
140 
141 /* State machine definitions and helpers */
142 
143 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
144 {
145 	return AP_SM_WAIT_NONE;
146 }
147 
148 /**
149  * ap_sm_recv(): Receive pending reply messages from an AP queue but do
150  *	not change the state of the device.
151  * @aq: pointer to the AP queue
152  *
153  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
154  */
155 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
156 {
157 	struct ap_queue_status status;
158 	struct ap_message *ap_msg;
159 	bool found = false;
160 	size_t reslen;
161 	unsigned long resgr0 = 0;
162 	int parts = 0;
163 
164 	/*
165 	 * DQAP loop until response code and resgr0 indicate that
166 	 * the msg is totally received. As we use the very same buffer
167 	 * the msg is overwritten with each invocation. That's intended
168 	 * and the receiver of the msg is informed with a msg rc code
169 	 * of EMSGSIZE in such a case.
170 	 */
171 	do {
172 		status = ap_dqap(aq->qid, &aq->reply->psmid,
173 				 aq->reply->msg, aq->reply->bufsize,
174 				 &aq->reply->len, &reslen, &resgr0);
175 		parts++;
176 	} while (status.response_code == 0xFF && resgr0 != 0);
177 
178 	switch (status.response_code) {
179 	case AP_RESPONSE_NORMAL:
180 		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
181 		if (!status.queue_empty && !aq->queue_count)
182 			aq->queue_count++;
183 		if (aq->queue_count > 0)
184 			mod_timer(&aq->timeout,
185 				  jiffies + aq->request_timeout);
186 		list_for_each_entry(ap_msg, &aq->pendingq, list) {
187 			if (ap_msg->psmid != aq->reply->psmid)
188 				continue;
189 			list_del_init(&ap_msg->list);
190 			aq->pendingq_count--;
191 			if (parts > 1) {
192 				ap_msg->rc = -EMSGSIZE;
193 				ap_msg->receive(aq, ap_msg, NULL);
194 			} else {
195 				ap_msg->receive(aq, ap_msg, aq->reply);
196 			}
197 			found = true;
198 			break;
199 		}
200 		if (!found) {
201 			AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
202 				    __func__, aq->reply->psmid,
203 				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
204 		}
205 		fallthrough;
206 	case AP_RESPONSE_NO_PENDING_REPLY:
207 		if (!status.queue_empty || aq->queue_count <= 0)
208 			break;
209 		/* The card shouldn't forget requests but who knows. */
210 		aq->queue_count = 0;
211 		list_splice_init(&aq->pendingq, &aq->requestq);
212 		aq->requestq_count += aq->pendingq_count;
213 		aq->pendingq_count = 0;
214 		break;
215 	default:
216 		break;
217 	}
218 	return status;
219 }
220 
221 /**
222  * ap_sm_read(): Receive pending reply messages from an AP queue.
223  * @aq: pointer to the AP queue
224  *
225  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
226  */
227 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
228 {
229 	struct ap_queue_status status;
230 
231 	if (!aq->reply)
232 		return AP_SM_WAIT_NONE;
233 	status = ap_sm_recv(aq);
234 	if (status.async)
235 		return AP_SM_WAIT_NONE;
236 	switch (status.response_code) {
237 	case AP_RESPONSE_NORMAL:
238 		if (aq->queue_count > 0) {
239 			aq->sm_state = AP_SM_STATE_WORKING;
240 			return AP_SM_WAIT_AGAIN;
241 		}
242 		aq->sm_state = AP_SM_STATE_IDLE;
243 		return AP_SM_WAIT_NONE;
244 	case AP_RESPONSE_NO_PENDING_REPLY:
245 		if (aq->queue_count > 0)
246 			return aq->interrupt ?
247 				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
248 		aq->sm_state = AP_SM_STATE_IDLE;
249 		return AP_SM_WAIT_NONE;
250 	default:
251 		aq->dev_state = AP_DEV_STATE_ERROR;
252 		aq->last_err_rc = status.response_code;
253 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
254 			    __func__, status.response_code,
255 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
256 		return AP_SM_WAIT_NONE;
257 	}
258 }
259 
260 /**
261  * ap_sm_write(): Send messages from the request queue to an AP queue.
262  * @aq: pointer to the AP queue
263  *
264  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
265  */
266 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
267 {
268 	struct ap_queue_status status;
269 	struct ap_message *ap_msg;
270 	ap_qid_t qid = aq->qid;
271 
272 	if (aq->requestq_count <= 0)
273 		return AP_SM_WAIT_NONE;
274 
275 	/* Start the next request on the queue. */
276 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
277 	status = __ap_send(qid, ap_msg->psmid,
278 			   ap_msg->msg, ap_msg->len,
279 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
280 	if (status.async)
281 		return AP_SM_WAIT_NONE;
282 	switch (status.response_code) {
283 	case AP_RESPONSE_NORMAL:
284 		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
285 		if (aq->queue_count == 1)
286 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
287 		list_move_tail(&ap_msg->list, &aq->pendingq);
288 		aq->requestq_count--;
289 		aq->pendingq_count++;
290 		if (aq->queue_count < aq->card->queue_depth) {
291 			aq->sm_state = AP_SM_STATE_WORKING;
292 			return AP_SM_WAIT_AGAIN;
293 		}
294 		fallthrough;
295 	case AP_RESPONSE_Q_FULL:
296 		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
297 		return aq->interrupt ?
298 			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
299 	case AP_RESPONSE_RESET_IN_PROGRESS:
300 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
301 		return AP_SM_WAIT_LOW_TIMEOUT;
302 	case AP_RESPONSE_INVALID_DOMAIN:
303 		AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
304 		fallthrough;
305 	case AP_RESPONSE_MESSAGE_TOO_BIG:
306 	case AP_RESPONSE_REQ_FAC_NOT_INST:
307 		list_del_init(&ap_msg->list);
308 		aq->requestq_count--;
309 		ap_msg->rc = -EINVAL;
310 		ap_msg->receive(aq, ap_msg, NULL);
311 		return AP_SM_WAIT_AGAIN;
312 	default:
313 		aq->dev_state = AP_DEV_STATE_ERROR;
314 		aq->last_err_rc = status.response_code;
315 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
316 			    __func__, status.response_code,
317 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
318 		return AP_SM_WAIT_NONE;
319 	}
320 }
321 
322 /**
323  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
324  * @aq: pointer to the AP queue
325  *
326  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
327  */
328 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
329 {
330 	return min(ap_sm_read(aq), ap_sm_write(aq));
331 }
332 
333 /**
334  * ap_sm_reset(): Reset an AP queue.
335  * @aq: The AP queue
336  *
337  * Submit the Reset command to an AP queue.
338  */
339 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
340 {
341 	struct ap_queue_status status;
342 
343 	status = ap_rapq(aq->qid, aq->rapq_fbit);
344 	if (status.async)
345 		return AP_SM_WAIT_NONE;
346 	switch (status.response_code) {
347 	case AP_RESPONSE_NORMAL:
348 	case AP_RESPONSE_RESET_IN_PROGRESS:
349 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
350 		aq->interrupt = false;
351 		aq->rapq_fbit = 0;
352 		return AP_SM_WAIT_LOW_TIMEOUT;
353 	default:
354 		aq->dev_state = AP_DEV_STATE_ERROR;
355 		aq->last_err_rc = status.response_code;
356 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
357 			    __func__, status.response_code,
358 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
359 		return AP_SM_WAIT_NONE;
360 	}
361 }
362 
363 /**
364  * ap_sm_reset_wait(): Test queue for completion of the reset operation
365  * @aq: pointer to the AP queue
366  *
367  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
368  */
369 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
370 {
371 	struct ap_queue_status status;
372 	void *lsi_ptr;
373 
374 	if (aq->queue_count > 0 && aq->reply)
375 		/* Try to read a completed message and get the status */
376 		status = ap_sm_recv(aq);
377 	else
378 		/* Get the status with TAPQ */
379 		status = ap_tapq(aq->qid, NULL);
380 
381 	switch (status.response_code) {
382 	case AP_RESPONSE_NORMAL:
383 		lsi_ptr = ap_airq_ptr();
384 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
385 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
386 		else
387 			aq->sm_state = (aq->queue_count > 0) ?
388 				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
389 		return AP_SM_WAIT_AGAIN;
390 	case AP_RESPONSE_BUSY:
391 	case AP_RESPONSE_RESET_IN_PROGRESS:
392 		return AP_SM_WAIT_LOW_TIMEOUT;
393 	case AP_RESPONSE_Q_NOT_AVAIL:
394 	case AP_RESPONSE_DECONFIGURED:
395 	case AP_RESPONSE_CHECKSTOPPED:
396 	default:
397 		aq->dev_state = AP_DEV_STATE_ERROR;
398 		aq->last_err_rc = status.response_code;
399 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
400 			    __func__, status.response_code,
401 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
402 		return AP_SM_WAIT_NONE;
403 	}
404 }
405 
406 /**
407  * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
408  * @aq: pointer to the AP queue
409  *
410  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
411  */
412 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
413 {
414 	struct ap_queue_status status;
415 
416 	if (aq->queue_count > 0 && aq->reply)
417 		/* Try to read a completed message and get the status */
418 		status = ap_sm_recv(aq);
419 	else
420 		/* Get the status with TAPQ */
421 		status = ap_tapq(aq->qid, NULL);
422 
423 	if (status.irq_enabled == 1) {
424 		/* Irqs are now enabled */
425 		aq->interrupt = true;
426 		aq->sm_state = (aq->queue_count > 0) ?
427 			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
428 	}
429 
430 	switch (status.response_code) {
431 	case AP_RESPONSE_NORMAL:
432 		if (aq->queue_count > 0)
433 			return AP_SM_WAIT_AGAIN;
434 		fallthrough;
435 	case AP_RESPONSE_NO_PENDING_REPLY:
436 		return AP_SM_WAIT_LOW_TIMEOUT;
437 	default:
438 		aq->dev_state = AP_DEV_STATE_ERROR;
439 		aq->last_err_rc = status.response_code;
440 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
441 			    __func__, status.response_code,
442 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
443 		return AP_SM_WAIT_NONE;
444 	}
445 }
446 
447 /**
448  * ap_sm_assoc_wait(): Test queue for completion of a pending
449  *		       association request.
450  * @aq: pointer to the AP queue
451  */
452 static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
453 {
454 	struct ap_queue_status status;
455 	struct ap_tapq_gr2 info;
456 
457 	status = ap_test_queue(aq->qid, 1, &info);
458 	/* handle asynchronous error on this queue */
459 	if (status.async && status.response_code) {
460 		aq->dev_state = AP_DEV_STATE_ERROR;
461 		aq->last_err_rc = status.response_code;
462 		AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
463 			    __func__, status.response_code,
464 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
465 		return AP_SM_WAIT_NONE;
466 	}
467 	if (status.response_code > AP_RESPONSE_BUSY) {
468 		aq->dev_state = AP_DEV_STATE_ERROR;
469 		aq->last_err_rc = status.response_code;
470 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
471 			    __func__, status.response_code,
472 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
473 		return AP_SM_WAIT_NONE;
474 	}
475 
476 	/* check bs bits */
477 	switch (info.bs) {
478 	case AP_BS_Q_USABLE:
479 		/* association is through */
480 		aq->sm_state = AP_SM_STATE_IDLE;
481 		AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
482 			   __func__, AP_QID_CARD(aq->qid),
483 			   AP_QID_QUEUE(aq->qid), aq->assoc_idx);
484 		return AP_SM_WAIT_NONE;
485 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
486 		/* association still pending */
487 		return AP_SM_WAIT_LOW_TIMEOUT;
488 	default:
489 		/* reset from 'outside' happened or no idea at all */
490 		aq->assoc_idx = ASSOC_IDX_INVALID;
491 		aq->dev_state = AP_DEV_STATE_ERROR;
492 		aq->last_err_rc = status.response_code;
493 		AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
494 			    __func__, info.bs,
495 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
496 		return AP_SM_WAIT_NONE;
497 	}
498 }
499 
500 /*
501  * AP state machine jump table
502  */
503 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
504 	[AP_SM_STATE_RESET_START] = {
505 		[AP_SM_EVENT_POLL] = ap_sm_reset,
506 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
507 	},
508 	[AP_SM_STATE_RESET_WAIT] = {
509 		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
510 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
511 	},
512 	[AP_SM_STATE_SETIRQ_WAIT] = {
513 		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
514 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
515 	},
516 	[AP_SM_STATE_IDLE] = {
517 		[AP_SM_EVENT_POLL] = ap_sm_write,
518 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
519 	},
520 	[AP_SM_STATE_WORKING] = {
521 		[AP_SM_EVENT_POLL] = ap_sm_read_write,
522 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
523 	},
524 	[AP_SM_STATE_QUEUE_FULL] = {
525 		[AP_SM_EVENT_POLL] = ap_sm_read,
526 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
527 	},
528 	[AP_SM_STATE_ASSOC_WAIT] = {
529 		[AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
530 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
531 	},
532 };
533 
534 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
535 {
536 	if (aq->config && !aq->chkstop &&
537 	    aq->dev_state > AP_DEV_STATE_UNINITIATED)
538 		return ap_jumptable[aq->sm_state][event](aq);
539 	else
540 		return AP_SM_WAIT_NONE;
541 }
542 
543 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
544 {
545 	enum ap_sm_wait wait;
546 
547 	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
548 		;
549 	return wait;
550 }
551 
552 /*
553  * AP queue related attributes.
554  */
555 static ssize_t request_count_show(struct device *dev,
556 				  struct device_attribute *attr,
557 				  char *buf)
558 {
559 	struct ap_queue *aq = to_ap_queue(dev);
560 	bool valid = false;
561 	u64 req_cnt;
562 
563 	spin_lock_bh(&aq->lock);
564 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
565 		req_cnt = aq->total_request_count;
566 		valid = true;
567 	}
568 	spin_unlock_bh(&aq->lock);
569 
570 	if (valid)
571 		return sysfs_emit(buf, "%llu\n", req_cnt);
572 	else
573 		return sysfs_emit(buf, "-\n");
574 }
575 
576 static ssize_t request_count_store(struct device *dev,
577 				   struct device_attribute *attr,
578 				   const char *buf, size_t count)
579 {
580 	struct ap_queue *aq = to_ap_queue(dev);
581 
582 	spin_lock_bh(&aq->lock);
583 	aq->total_request_count = 0;
584 	spin_unlock_bh(&aq->lock);
585 
586 	return count;
587 }
588 
589 static DEVICE_ATTR_RW(request_count);
590 
591 static ssize_t requestq_count_show(struct device *dev,
592 				   struct device_attribute *attr, char *buf)
593 {
594 	struct ap_queue *aq = to_ap_queue(dev);
595 	unsigned int reqq_cnt = 0;
596 
597 	spin_lock_bh(&aq->lock);
598 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
599 		reqq_cnt = aq->requestq_count;
600 	spin_unlock_bh(&aq->lock);
601 	return sysfs_emit(buf, "%d\n", reqq_cnt);
602 }
603 
604 static DEVICE_ATTR_RO(requestq_count);
605 
606 static ssize_t pendingq_count_show(struct device *dev,
607 				   struct device_attribute *attr, char *buf)
608 {
609 	struct ap_queue *aq = to_ap_queue(dev);
610 	unsigned int penq_cnt = 0;
611 
612 	spin_lock_bh(&aq->lock);
613 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
614 		penq_cnt = aq->pendingq_count;
615 	spin_unlock_bh(&aq->lock);
616 	return sysfs_emit(buf, "%d\n", penq_cnt);
617 }
618 
619 static DEVICE_ATTR_RO(pendingq_count);
620 
621 static ssize_t reset_show(struct device *dev,
622 			  struct device_attribute *attr, char *buf)
623 {
624 	struct ap_queue *aq = to_ap_queue(dev);
625 	int rc = 0;
626 
627 	spin_lock_bh(&aq->lock);
628 	switch (aq->sm_state) {
629 	case AP_SM_STATE_RESET_START:
630 	case AP_SM_STATE_RESET_WAIT:
631 		rc = sysfs_emit(buf, "Reset in progress.\n");
632 		break;
633 	case AP_SM_STATE_WORKING:
634 	case AP_SM_STATE_QUEUE_FULL:
635 		rc = sysfs_emit(buf, "Reset Timer armed.\n");
636 		break;
637 	default:
638 		rc = sysfs_emit(buf, "No Reset Timer set.\n");
639 	}
640 	spin_unlock_bh(&aq->lock);
641 	return rc;
642 }
643 
644 static ssize_t reset_store(struct device *dev,
645 			   struct device_attribute *attr,
646 			   const char *buf, size_t count)
647 {
648 	struct ap_queue *aq = to_ap_queue(dev);
649 
650 	spin_lock_bh(&aq->lock);
651 	__ap_flush_queue(aq);
652 	aq->sm_state = AP_SM_STATE_RESET_START;
653 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
654 	spin_unlock_bh(&aq->lock);
655 
656 	AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
657 		    __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
658 
659 	return count;
660 }
661 
662 static DEVICE_ATTR_RW(reset);
663 
664 static ssize_t interrupt_show(struct device *dev,
665 			      struct device_attribute *attr, char *buf)
666 {
667 	struct ap_queue *aq = to_ap_queue(dev);
668 	int rc = 0;
669 
670 	spin_lock_bh(&aq->lock);
671 	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
672 		rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
673 	else if (aq->interrupt)
674 		rc = sysfs_emit(buf, "Interrupts enabled.\n");
675 	else
676 		rc = sysfs_emit(buf, "Interrupts disabled.\n");
677 	spin_unlock_bh(&aq->lock);
678 	return rc;
679 }
680 
681 static DEVICE_ATTR_RO(interrupt);
682 
683 static ssize_t config_show(struct device *dev,
684 			   struct device_attribute *attr, char *buf)
685 {
686 	struct ap_queue *aq = to_ap_queue(dev);
687 	int rc;
688 
689 	spin_lock_bh(&aq->lock);
690 	rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
691 	spin_unlock_bh(&aq->lock);
692 	return rc;
693 }
694 
695 static DEVICE_ATTR_RO(config);
696 
697 static ssize_t chkstop_show(struct device *dev,
698 			    struct device_attribute *attr, char *buf)
699 {
700 	struct ap_queue *aq = to_ap_queue(dev);
701 	int rc;
702 
703 	spin_lock_bh(&aq->lock);
704 	rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
705 	spin_unlock_bh(&aq->lock);
706 	return rc;
707 }
708 
709 static DEVICE_ATTR_RO(chkstop);
710 
711 static ssize_t ap_functions_show(struct device *dev,
712 				 struct device_attribute *attr, char *buf)
713 {
714 	struct ap_queue *aq = to_ap_queue(dev);
715 	struct ap_queue_status status;
716 	struct ap_tapq_gr2 info;
717 
718 	status = ap_test_queue(aq->qid, 1, &info);
719 	if (status.response_code > AP_RESPONSE_BUSY) {
720 		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
721 			   __func__, status.response_code,
722 			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
723 		return -EIO;
724 	}
725 
726 	return sysfs_emit(buf, "0x%08X\n", info.fac);
727 }
728 
729 static DEVICE_ATTR_RO(ap_functions);
730 
731 #ifdef CONFIG_ZCRYPT_DEBUG
732 static ssize_t states_show(struct device *dev,
733 			   struct device_attribute *attr, char *buf)
734 {
735 	struct ap_queue *aq = to_ap_queue(dev);
736 	int rc = 0;
737 
738 	spin_lock_bh(&aq->lock);
739 	/* queue device state */
740 	switch (aq->dev_state) {
741 	case AP_DEV_STATE_UNINITIATED:
742 		rc = sysfs_emit(buf, "UNINITIATED\n");
743 		break;
744 	case AP_DEV_STATE_OPERATING:
745 		rc = sysfs_emit(buf, "OPERATING");
746 		break;
747 	case AP_DEV_STATE_SHUTDOWN:
748 		rc = sysfs_emit(buf, "SHUTDOWN");
749 		break;
750 	case AP_DEV_STATE_ERROR:
751 		rc = sysfs_emit(buf, "ERROR");
752 		break;
753 	default:
754 		rc = sysfs_emit(buf, "UNKNOWN");
755 	}
756 	/* state machine state */
757 	if (aq->dev_state) {
758 		switch (aq->sm_state) {
759 		case AP_SM_STATE_RESET_START:
760 			rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
761 			break;
762 		case AP_SM_STATE_RESET_WAIT:
763 			rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
764 			break;
765 		case AP_SM_STATE_SETIRQ_WAIT:
766 			rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
767 			break;
768 		case AP_SM_STATE_IDLE:
769 			rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
770 			break;
771 		case AP_SM_STATE_WORKING:
772 			rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
773 			break;
774 		case AP_SM_STATE_QUEUE_FULL:
775 			rc += sysfs_emit_at(buf, rc, " [FULL]\n");
776 			break;
777 		case AP_SM_STATE_ASSOC_WAIT:
778 			rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
779 			break;
780 		default:
781 			rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
782 		}
783 	}
784 	spin_unlock_bh(&aq->lock);
785 
786 	return rc;
787 }
788 static DEVICE_ATTR_RO(states);
789 
790 static ssize_t last_err_rc_show(struct device *dev,
791 				struct device_attribute *attr, char *buf)
792 {
793 	struct ap_queue *aq = to_ap_queue(dev);
794 	int rc;
795 
796 	spin_lock_bh(&aq->lock);
797 	rc = aq->last_err_rc;
798 	spin_unlock_bh(&aq->lock);
799 
800 	switch (rc) {
801 	case AP_RESPONSE_NORMAL:
802 		return sysfs_emit(buf, "NORMAL\n");
803 	case AP_RESPONSE_Q_NOT_AVAIL:
804 		return sysfs_emit(buf, "Q_NOT_AVAIL\n");
805 	case AP_RESPONSE_RESET_IN_PROGRESS:
806 		return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
807 	case AP_RESPONSE_DECONFIGURED:
808 		return sysfs_emit(buf, "DECONFIGURED\n");
809 	case AP_RESPONSE_CHECKSTOPPED:
810 		return sysfs_emit(buf, "CHECKSTOPPED\n");
811 	case AP_RESPONSE_BUSY:
812 		return sysfs_emit(buf, "BUSY\n");
813 	case AP_RESPONSE_INVALID_ADDRESS:
814 		return sysfs_emit(buf, "INVALID_ADDRESS\n");
815 	case AP_RESPONSE_OTHERWISE_CHANGED:
816 		return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
817 	case AP_RESPONSE_Q_FULL:
818 		return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
819 	case AP_RESPONSE_INDEX_TOO_BIG:
820 		return sysfs_emit(buf, "INDEX_TOO_BIG\n");
821 	case AP_RESPONSE_NO_FIRST_PART:
822 		return sysfs_emit(buf, "NO_FIRST_PART\n");
823 	case AP_RESPONSE_MESSAGE_TOO_BIG:
824 		return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
825 	case AP_RESPONSE_REQ_FAC_NOT_INST:
826 		return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
827 	default:
828 		return sysfs_emit(buf, "response code %d\n", rc);
829 	}
830 }
831 static DEVICE_ATTR_RO(last_err_rc);
832 #endif
833 
834 static struct attribute *ap_queue_dev_attrs[] = {
835 	&dev_attr_request_count.attr,
836 	&dev_attr_requestq_count.attr,
837 	&dev_attr_pendingq_count.attr,
838 	&dev_attr_reset.attr,
839 	&dev_attr_interrupt.attr,
840 	&dev_attr_config.attr,
841 	&dev_attr_chkstop.attr,
842 	&dev_attr_ap_functions.attr,
843 #ifdef CONFIG_ZCRYPT_DEBUG
844 	&dev_attr_states.attr,
845 	&dev_attr_last_err_rc.attr,
846 #endif
847 	NULL
848 };
849 
850 static struct attribute_group ap_queue_dev_attr_group = {
851 	.attrs = ap_queue_dev_attrs
852 };
853 
854 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
855 	&ap_queue_dev_attr_group,
856 	NULL
857 };
858 
859 static struct device_type ap_queue_type = {
860 	.name = "ap_queue",
861 	.groups = ap_queue_dev_attr_groups,
862 };
863 
864 static ssize_t se_bind_show(struct device *dev,
865 			    struct device_attribute *attr, char *buf)
866 {
867 	struct ap_queue *aq = to_ap_queue(dev);
868 	struct ap_queue_status status;
869 	struct ap_tapq_gr2 info;
870 
871 	if (!ap_q_supports_bind(aq))
872 		return sysfs_emit(buf, "-\n");
873 
874 	status = ap_test_queue(aq->qid, 1, &info);
875 	if (status.response_code > AP_RESPONSE_BUSY) {
876 		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
877 			   __func__, status.response_code,
878 			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
879 		return -EIO;
880 	}
881 	switch (info.bs) {
882 	case AP_BS_Q_USABLE:
883 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
884 		return sysfs_emit(buf, "bound\n");
885 	default:
886 		return sysfs_emit(buf, "unbound\n");
887 	}
888 }
889 
890 static ssize_t se_bind_store(struct device *dev,
891 			     struct device_attribute *attr,
892 			     const char *buf, size_t count)
893 {
894 	struct ap_queue *aq = to_ap_queue(dev);
895 	struct ap_queue_status status;
896 	bool value;
897 	int rc;
898 
899 	if (!ap_q_supports_bind(aq))
900 		return -EINVAL;
901 
902 	/* only 0 (unbind) and 1 (bind) allowed */
903 	rc = kstrtobool(buf, &value);
904 	if (rc)
905 		return rc;
906 
907 	if (value) {
908 		/* bind, do BAPQ */
909 		spin_lock_bh(&aq->lock);
910 		if (aq->sm_state < AP_SM_STATE_IDLE) {
911 			spin_unlock_bh(&aq->lock);
912 			return -EBUSY;
913 		}
914 		status = ap_bapq(aq->qid);
915 		spin_unlock_bh(&aq->lock);
916 		if (status.response_code) {
917 			AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
918 				    __func__, status.response_code,
919 				    AP_QID_CARD(aq->qid),
920 				    AP_QID_QUEUE(aq->qid));
921 			return -EIO;
922 		}
923 	} else {
924 		/* unbind, set F bit arg and trigger RAPQ */
925 		spin_lock_bh(&aq->lock);
926 		__ap_flush_queue(aq);
927 		aq->rapq_fbit = 1;
928 		aq->assoc_idx = ASSOC_IDX_INVALID;
929 		aq->sm_state = AP_SM_STATE_RESET_START;
930 		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
931 		spin_unlock_bh(&aq->lock);
932 	}
933 
934 	return count;
935 }
936 
937 static DEVICE_ATTR_RW(se_bind);
938 
939 static ssize_t se_associate_show(struct device *dev,
940 				 struct device_attribute *attr, char *buf)
941 {
942 	struct ap_queue *aq = to_ap_queue(dev);
943 	struct ap_queue_status status;
944 	struct ap_tapq_gr2 info;
945 
946 	if (!ap_q_supports_assoc(aq))
947 		return sysfs_emit(buf, "-\n");
948 
949 	status = ap_test_queue(aq->qid, 1, &info);
950 	if (status.response_code > AP_RESPONSE_BUSY) {
951 		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
952 			   __func__, status.response_code,
953 			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
954 		return -EIO;
955 	}
956 
957 	switch (info.bs) {
958 	case AP_BS_Q_USABLE:
959 		if (aq->assoc_idx == ASSOC_IDX_INVALID) {
960 			AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
961 			return -EIO;
962 		}
963 		return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
964 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
965 		if (aq->assoc_idx != ASSOC_IDX_INVALID)
966 			return sysfs_emit(buf, "association pending\n");
967 		fallthrough;
968 	default:
969 		return sysfs_emit(buf, "unassociated\n");
970 	}
971 }
972 
973 static ssize_t se_associate_store(struct device *dev,
974 				  struct device_attribute *attr,
975 				  const char *buf, size_t count)
976 {
977 	struct ap_queue *aq = to_ap_queue(dev);
978 	struct ap_queue_status status;
979 	unsigned int value;
980 	int rc;
981 
982 	if (!ap_q_supports_assoc(aq))
983 		return -EINVAL;
984 
985 	/* association index needs to be >= 0 */
986 	rc = kstrtouint(buf, 0, &value);
987 	if (rc)
988 		return rc;
989 	if (value >= ASSOC_IDX_INVALID)
990 		return -EINVAL;
991 
992 	spin_lock_bh(&aq->lock);
993 
994 	/* sm should be in idle state */
995 	if (aq->sm_state != AP_SM_STATE_IDLE) {
996 		spin_unlock_bh(&aq->lock);
997 		return -EBUSY;
998 	}
999 
1000 	/* already associated or association pending ? */
1001 	if (aq->assoc_idx != ASSOC_IDX_INVALID) {
1002 		spin_unlock_bh(&aq->lock);
1003 		return -EINVAL;
1004 	}
1005 
1006 	/* trigger the asynchronous association request */
1007 	status = ap_aapq(aq->qid, value);
1008 	switch (status.response_code) {
1009 	case AP_RESPONSE_NORMAL:
1010 	case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1011 		aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1012 		aq->assoc_idx = value;
1013 		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1014 		spin_unlock_bh(&aq->lock);
1015 		break;
1016 	default:
1017 		spin_unlock_bh(&aq->lock);
1018 		AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1019 			    __func__, status.response_code,
1020 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1021 		return -EIO;
1022 	}
1023 
1024 	return count;
1025 }
1026 
1027 static DEVICE_ATTR_RW(se_associate);
1028 
1029 static struct attribute *ap_queue_dev_sb_attrs[] = {
1030 	&dev_attr_se_bind.attr,
1031 	&dev_attr_se_associate.attr,
1032 	NULL
1033 };
1034 
1035 static struct attribute_group ap_queue_dev_sb_attr_group = {
1036 	.attrs = ap_queue_dev_sb_attrs
1037 };
1038 
1039 static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1040 	&ap_queue_dev_sb_attr_group,
1041 	NULL
1042 };
1043 
1044 static void ap_queue_device_release(struct device *dev)
1045 {
1046 	struct ap_queue *aq = to_ap_queue(dev);
1047 
1048 	spin_lock_bh(&ap_queues_lock);
1049 	hash_del(&aq->hnode);
1050 	spin_unlock_bh(&ap_queues_lock);
1051 
1052 	kfree(aq);
1053 }
1054 
1055 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
1056 {
1057 	struct ap_queue *aq;
1058 
1059 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1060 	if (!aq)
1061 		return NULL;
1062 	aq->ap_dev.device.release = ap_queue_device_release;
1063 	aq->ap_dev.device.type = &ap_queue_type;
1064 	aq->ap_dev.device_type = device_type;
1065 	// add optional SE secure binding attributes group
1066 	if (ap_sb_available() && is_prot_virt_guest())
1067 		aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1068 	aq->qid = qid;
1069 	aq->interrupt = false;
1070 	spin_lock_init(&aq->lock);
1071 	INIT_LIST_HEAD(&aq->pendingq);
1072 	INIT_LIST_HEAD(&aq->requestq);
1073 	timer_setup(&aq->timeout, ap_request_timeout, 0);
1074 
1075 	return aq;
1076 }
1077 
1078 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1079 {
1080 	aq->reply = reply;
1081 
1082 	spin_lock_bh(&aq->lock);
1083 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1084 	spin_unlock_bh(&aq->lock);
1085 }
1086 EXPORT_SYMBOL(ap_queue_init_reply);
1087 
1088 /**
1089  * ap_queue_message(): Queue a request to an AP device.
1090  * @aq: The AP device to queue the message to
1091  * @ap_msg: The message that is to be added
1092  */
1093 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1094 {
1095 	int rc = 0;
1096 
1097 	/* msg needs to have a valid receive-callback */
1098 	BUG_ON(!ap_msg->receive);
1099 
1100 	spin_lock_bh(&aq->lock);
1101 
1102 	/* only allow to queue new messages if device state is ok */
1103 	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1104 		list_add_tail(&ap_msg->list, &aq->requestq);
1105 		aq->requestq_count++;
1106 		aq->total_request_count++;
1107 		atomic64_inc(&aq->card->total_request_count);
1108 	} else {
1109 		rc = -ENODEV;
1110 	}
1111 
1112 	/* Send/receive as many request from the queue as possible. */
1113 	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1114 
1115 	spin_unlock_bh(&aq->lock);
1116 
1117 	return rc;
1118 }
1119 EXPORT_SYMBOL(ap_queue_message);
1120 
1121 /**
1122  * ap_cancel_message(): Cancel a crypto request.
1123  * @aq: The AP device that has the message queued
1124  * @ap_msg: The message that is to be removed
1125  *
1126  * Cancel a crypto request. This is done by removing the request
1127  * from the device pending or request queue. Note that the
1128  * request stays on the AP queue. When it finishes the message
1129  * reply will be discarded because the psmid can't be found.
1130  */
1131 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1132 {
1133 	struct ap_message *tmp;
1134 
1135 	spin_lock_bh(&aq->lock);
1136 	if (!list_empty(&ap_msg->list)) {
1137 		list_for_each_entry(tmp, &aq->pendingq, list)
1138 			if (tmp->psmid == ap_msg->psmid) {
1139 				aq->pendingq_count--;
1140 				goto found;
1141 			}
1142 		aq->requestq_count--;
1143 found:
1144 		list_del_init(&ap_msg->list);
1145 	}
1146 	spin_unlock_bh(&aq->lock);
1147 }
1148 EXPORT_SYMBOL(ap_cancel_message);
1149 
1150 /**
1151  * __ap_flush_queue(): Flush requests.
1152  * @aq: Pointer to the AP queue
1153  *
1154  * Flush all requests from the request/pending queue of an AP device.
1155  */
1156 static void __ap_flush_queue(struct ap_queue *aq)
1157 {
1158 	struct ap_message *ap_msg, *next;
1159 
1160 	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1161 		list_del_init(&ap_msg->list);
1162 		aq->pendingq_count--;
1163 		ap_msg->rc = -EAGAIN;
1164 		ap_msg->receive(aq, ap_msg, NULL);
1165 	}
1166 	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1167 		list_del_init(&ap_msg->list);
1168 		aq->requestq_count--;
1169 		ap_msg->rc = -EAGAIN;
1170 		ap_msg->receive(aq, ap_msg, NULL);
1171 	}
1172 	aq->queue_count = 0;
1173 }
1174 
1175 void ap_flush_queue(struct ap_queue *aq)
1176 {
1177 	spin_lock_bh(&aq->lock);
1178 	__ap_flush_queue(aq);
1179 	spin_unlock_bh(&aq->lock);
1180 }
1181 EXPORT_SYMBOL(ap_flush_queue);
1182 
1183 void ap_queue_prepare_remove(struct ap_queue *aq)
1184 {
1185 	spin_lock_bh(&aq->lock);
1186 	/* flush queue */
1187 	__ap_flush_queue(aq);
1188 	/* move queue device state to SHUTDOWN in progress */
1189 	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1190 	spin_unlock_bh(&aq->lock);
1191 	del_timer_sync(&aq->timeout);
1192 }
1193 
1194 void ap_queue_remove(struct ap_queue *aq)
1195 {
1196 	/*
1197 	 * all messages have been flushed and the device state
1198 	 * is SHUTDOWN. Now reset with zero which also clears
1199 	 * the irq registration and move the device state
1200 	 * to the initial value AP_DEV_STATE_UNINITIATED.
1201 	 */
1202 	spin_lock_bh(&aq->lock);
1203 	ap_zapq(aq->qid, 0);
1204 	aq->dev_state = AP_DEV_STATE_UNINITIATED;
1205 	spin_unlock_bh(&aq->lock);
1206 }
1207 
1208 void ap_queue_init_state(struct ap_queue *aq)
1209 {
1210 	spin_lock_bh(&aq->lock);
1211 	aq->dev_state = AP_DEV_STATE_OPERATING;
1212 	aq->sm_state = AP_SM_STATE_RESET_START;
1213 	aq->last_err_rc = 0;
1214 	aq->assoc_idx = ASSOC_IDX_INVALID;
1215 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1216 	spin_unlock_bh(&aq->lock);
1217 }
1218 EXPORT_SYMBOL(ap_queue_init_state);
1219