xref: /linux/drivers/s390/crypto/ap_queue.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2016, 2023
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  *
6  * Adjunct processor bus, queue related code.
7  */
8 
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15 
16 #include "ap_bus.h"
17 #include "ap_debug.h"
18 
19 static void __ap_flush_queue(struct ap_queue *aq);
20 
21 /*
22  * some AP queue helper functions
23  */
24 
25 static inline bool ap_q_supports_bind(struct ap_queue *aq)
26 {
27 	return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
28 }
29 
30 static inline bool ap_q_supports_assoc(struct ap_queue *aq)
31 {
32 	return aq->card->hwinfo.ep11;
33 }
34 
35 static inline bool ap_q_needs_bind(struct ap_queue *aq)
36 {
37 	return ap_q_supports_bind(aq) && ap_sb_available();
38 }
39 
40 /**
41  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
42  * @aq: The AP queue
43  * @ind: the notification indicator byte
44  *
45  * Enables interruption on AP queue via ap_aqic(). Based on the return
46  * value it waits a while and tests the AP queue if interrupts
47  * have been switched on using ap_test_queue().
48  */
49 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
50 {
51 	union ap_qirq_ctrl qirqctrl = { .value = 0 };
52 	struct ap_queue_status status;
53 
54 	qirqctrl.ir = 1;
55 	qirqctrl.isc = AP_ISC;
56 	status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
57 	if (status.async)
58 		return -EPERM;
59 	switch (status.response_code) {
60 	case AP_RESPONSE_NORMAL:
61 	case AP_RESPONSE_OTHERWISE_CHANGED:
62 		return 0;
63 	case AP_RESPONSE_Q_NOT_AVAIL:
64 	case AP_RESPONSE_DECONFIGURED:
65 	case AP_RESPONSE_CHECKSTOPPED:
66 	case AP_RESPONSE_INVALID_ADDRESS:
67 		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
68 		       AP_QID_CARD(aq->qid),
69 		       AP_QID_QUEUE(aq->qid));
70 		return -EOPNOTSUPP;
71 	case AP_RESPONSE_RESET_IN_PROGRESS:
72 	case AP_RESPONSE_BUSY:
73 	default:
74 		return -EBUSY;
75 	}
76 }
77 
78 /**
79  * __ap_send(): Send message to adjunct processor queue.
80  * @qid: The AP queue number
81  * @psmid: The program supplied message identifier
82  * @msg: The message text
83  * @msglen: The message length
84  * @special: Special Bit
85  *
86  * Returns AP queue status structure.
87  * Condition code 1 on NQAP can't happen because the L bit is 1.
88  * Condition code 2 on NQAP also means the send is incomplete,
89  * because a segment boundary was reached. The NQAP is repeated.
90  */
91 static inline struct ap_queue_status
92 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
93 	  int special)
94 {
95 	if (special)
96 		qid |= 0x400000UL;
97 	return ap_nqap(qid, psmid, msg, msglen);
98 }
99 
100 /* State machine definitions and helpers */
101 
102 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
103 {
104 	return AP_SM_WAIT_NONE;
105 }
106 
107 /**
108  * ap_sm_recv(): Receive pending reply messages from an AP queue but do
109  *	not change the state of the device.
110  * @aq: pointer to the AP queue
111  *
112  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
113  */
114 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
115 {
116 	struct ap_queue_status status;
117 	struct ap_message *ap_msg;
118 	bool found = false;
119 	size_t reslen;
120 	unsigned long resgr0 = 0;
121 	int parts = 0;
122 
123 	/*
124 	 * DQAP loop until response code and resgr0 indicate that
125 	 * the msg is totally received. As we use the very same buffer
126 	 * the msg is overwritten with each invocation. That's intended
127 	 * and the receiver of the msg is informed with a msg rc code
128 	 * of EMSGSIZE in such a case.
129 	 */
130 	do {
131 		status = ap_dqap(aq->qid, &aq->reply->psmid,
132 				 aq->reply->msg, aq->reply->bufsize,
133 				 &aq->reply->len, &reslen, &resgr0);
134 		parts++;
135 	} while (status.response_code == 0xFF && resgr0 != 0);
136 
137 	switch (status.response_code) {
138 	case AP_RESPONSE_NORMAL:
139 		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
140 		if (!status.queue_empty && !aq->queue_count)
141 			aq->queue_count++;
142 		if (aq->queue_count > 0)
143 			mod_timer(&aq->timeout,
144 				  jiffies + aq->request_timeout);
145 		list_for_each_entry(ap_msg, &aq->pendingq, list) {
146 			if (ap_msg->psmid != aq->reply->psmid)
147 				continue;
148 			list_del_init(&ap_msg->list);
149 			aq->pendingq_count--;
150 			if (parts > 1) {
151 				ap_msg->rc = -EMSGSIZE;
152 				ap_msg->receive(aq, ap_msg, NULL);
153 			} else {
154 				ap_msg->receive(aq, ap_msg, aq->reply);
155 			}
156 			found = true;
157 			break;
158 		}
159 		if (!found) {
160 			AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
161 				    __func__, aq->reply->psmid,
162 				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
163 		}
164 		fallthrough;
165 	case AP_RESPONSE_NO_PENDING_REPLY:
166 		if (!status.queue_empty || aq->queue_count <= 0)
167 			break;
168 		/* The card shouldn't forget requests but who knows. */
169 		aq->queue_count = 0;
170 		list_splice_init(&aq->pendingq, &aq->requestq);
171 		aq->requestq_count += aq->pendingq_count;
172 		aq->pendingq_count = 0;
173 		break;
174 	default:
175 		break;
176 	}
177 	return status;
178 }
179 
180 /**
181  * ap_sm_read(): Receive pending reply messages from an AP queue.
182  * @aq: pointer to the AP queue
183  *
184  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
185  */
186 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
187 {
188 	struct ap_queue_status status;
189 
190 	if (!aq->reply)
191 		return AP_SM_WAIT_NONE;
192 	status = ap_sm_recv(aq);
193 	if (status.async)
194 		return AP_SM_WAIT_NONE;
195 	switch (status.response_code) {
196 	case AP_RESPONSE_NORMAL:
197 		if (aq->queue_count > 0) {
198 			aq->sm_state = AP_SM_STATE_WORKING;
199 			return AP_SM_WAIT_AGAIN;
200 		}
201 		aq->sm_state = AP_SM_STATE_IDLE;
202 		break;
203 	case AP_RESPONSE_NO_PENDING_REPLY:
204 		if (aq->queue_count > 0)
205 			return status.irq_enabled ?
206 				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
207 		aq->sm_state = AP_SM_STATE_IDLE;
208 		break;
209 	default:
210 		aq->dev_state = AP_DEV_STATE_ERROR;
211 		aq->last_err_rc = status.response_code;
212 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
213 			    __func__, status.response_code,
214 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
215 		return AP_SM_WAIT_NONE;
216 	}
217 	/* Check and maybe enable irq support (again) on this queue */
218 	if (!status.irq_enabled && status.queue_empty) {
219 		void *lsi_ptr = ap_airq_ptr();
220 
221 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
222 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
223 			return AP_SM_WAIT_AGAIN;
224 		}
225 	}
226 	return AP_SM_WAIT_NONE;
227 }
228 
229 /**
230  * ap_sm_write(): Send messages from the request queue to an AP queue.
231  * @aq: pointer to the AP queue
232  *
233  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
234  */
235 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
236 {
237 	struct ap_queue_status status;
238 	struct ap_message *ap_msg;
239 	ap_qid_t qid = aq->qid;
240 
241 	if (aq->requestq_count <= 0)
242 		return AP_SM_WAIT_NONE;
243 
244 	/* Start the next request on the queue. */
245 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
246 	status = __ap_send(qid, ap_msg->psmid,
247 			   ap_msg->msg, ap_msg->len,
248 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
249 	if (status.async)
250 		return AP_SM_WAIT_NONE;
251 	switch (status.response_code) {
252 	case AP_RESPONSE_NORMAL:
253 		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
254 		if (aq->queue_count == 1)
255 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
256 		list_move_tail(&ap_msg->list, &aq->pendingq);
257 		aq->requestq_count--;
258 		aq->pendingq_count++;
259 		if (aq->queue_count < aq->card->hwinfo.qd) {
260 			aq->sm_state = AP_SM_STATE_WORKING;
261 			return AP_SM_WAIT_AGAIN;
262 		}
263 		fallthrough;
264 	case AP_RESPONSE_Q_FULL:
265 		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
266 		return status.irq_enabled ?
267 			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
268 	case AP_RESPONSE_RESET_IN_PROGRESS:
269 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
270 		return AP_SM_WAIT_LOW_TIMEOUT;
271 	case AP_RESPONSE_INVALID_DOMAIN:
272 		AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
273 		fallthrough;
274 	case AP_RESPONSE_MESSAGE_TOO_BIG:
275 	case AP_RESPONSE_REQ_FAC_NOT_INST:
276 		list_del_init(&ap_msg->list);
277 		aq->requestq_count--;
278 		ap_msg->rc = -EINVAL;
279 		ap_msg->receive(aq, ap_msg, NULL);
280 		return AP_SM_WAIT_AGAIN;
281 	default:
282 		aq->dev_state = AP_DEV_STATE_ERROR;
283 		aq->last_err_rc = status.response_code;
284 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
285 			    __func__, status.response_code,
286 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
287 		return AP_SM_WAIT_NONE;
288 	}
289 }
290 
291 /**
292  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
293  * @aq: pointer to the AP queue
294  *
295  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
296  */
297 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
298 {
299 	return min(ap_sm_read(aq), ap_sm_write(aq));
300 }
301 
302 /**
303  * ap_sm_reset(): Reset an AP queue.
304  * @aq: The AP queue
305  *
306  * Submit the Reset command to an AP queue.
307  */
308 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
309 {
310 	struct ap_queue_status status;
311 
312 	status = ap_rapq(aq->qid, aq->rapq_fbit);
313 	if (status.async)
314 		return AP_SM_WAIT_NONE;
315 	switch (status.response_code) {
316 	case AP_RESPONSE_NORMAL:
317 	case AP_RESPONSE_RESET_IN_PROGRESS:
318 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
319 		aq->rapq_fbit = 0;
320 		return AP_SM_WAIT_LOW_TIMEOUT;
321 	default:
322 		aq->dev_state = AP_DEV_STATE_ERROR;
323 		aq->last_err_rc = status.response_code;
324 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
325 			    __func__, status.response_code,
326 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
327 		return AP_SM_WAIT_NONE;
328 	}
329 }
330 
331 /**
332  * ap_sm_reset_wait(): Test queue for completion of the reset operation
333  * @aq: pointer to the AP queue
334  *
335  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
336  */
337 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
338 {
339 	struct ap_queue_status status;
340 	struct ap_tapq_hwinfo hwinfo;
341 	void *lsi_ptr;
342 
343 	/* Get the status with TAPQ */
344 	status = ap_test_queue(aq->qid, 1, &hwinfo);
345 
346 	switch (status.response_code) {
347 	case AP_RESPONSE_NORMAL:
348 		aq->se_bstate = hwinfo.bs;
349 		lsi_ptr = ap_airq_ptr();
350 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
351 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
352 		else
353 			aq->sm_state = (aq->queue_count > 0) ?
354 				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
355 		return AP_SM_WAIT_AGAIN;
356 	case AP_RESPONSE_BUSY:
357 	case AP_RESPONSE_RESET_IN_PROGRESS:
358 		return AP_SM_WAIT_LOW_TIMEOUT;
359 	case AP_RESPONSE_Q_NOT_AVAIL:
360 	case AP_RESPONSE_DECONFIGURED:
361 	case AP_RESPONSE_CHECKSTOPPED:
362 	default:
363 		aq->dev_state = AP_DEV_STATE_ERROR;
364 		aq->last_err_rc = status.response_code;
365 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
366 			    __func__, status.response_code,
367 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
368 		return AP_SM_WAIT_NONE;
369 	}
370 }
371 
372 /**
373  * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
374  * @aq: pointer to the AP queue
375  *
376  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
377  */
378 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
379 {
380 	struct ap_queue_status status;
381 
382 	if (aq->queue_count > 0 && aq->reply)
383 		/* Try to read a completed message and get the status */
384 		status = ap_sm_recv(aq);
385 	else
386 		/* Get the status with TAPQ */
387 		status = ap_tapq(aq->qid, NULL);
388 
389 	if (status.irq_enabled == 1) {
390 		/* Irqs are now enabled */
391 		aq->sm_state = (aq->queue_count > 0) ?
392 			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
393 	}
394 
395 	switch (status.response_code) {
396 	case AP_RESPONSE_NORMAL:
397 		if (aq->queue_count > 0)
398 			return AP_SM_WAIT_AGAIN;
399 		fallthrough;
400 	case AP_RESPONSE_NO_PENDING_REPLY:
401 		return AP_SM_WAIT_LOW_TIMEOUT;
402 	default:
403 		aq->dev_state = AP_DEV_STATE_ERROR;
404 		aq->last_err_rc = status.response_code;
405 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
406 			    __func__, status.response_code,
407 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
408 		return AP_SM_WAIT_NONE;
409 	}
410 }
411 
412 /**
413  * ap_sm_assoc_wait(): Test queue for completion of a pending
414  *		       association request.
415  * @aq: pointer to the AP queue
416  */
417 static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
418 {
419 	struct ap_queue_status status;
420 	struct ap_tapq_hwinfo hwinfo;
421 
422 	status = ap_test_queue(aq->qid, 1, &hwinfo);
423 	/* handle asynchronous error on this queue */
424 	if (status.async && status.response_code) {
425 		aq->dev_state = AP_DEV_STATE_ERROR;
426 		aq->last_err_rc = status.response_code;
427 		AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
428 			    __func__, status.response_code,
429 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
430 		return AP_SM_WAIT_NONE;
431 	}
432 	if (status.response_code > AP_RESPONSE_BUSY) {
433 		aq->dev_state = AP_DEV_STATE_ERROR;
434 		aq->last_err_rc = status.response_code;
435 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
436 			    __func__, status.response_code,
437 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
438 		return AP_SM_WAIT_NONE;
439 	}
440 
441 	/* update queue's SE bind state */
442 	aq->se_bstate = hwinfo.bs;
443 
444 	/* check bs bits */
445 	switch (hwinfo.bs) {
446 	case AP_BS_Q_USABLE:
447 		/* association is through */
448 		aq->sm_state = AP_SM_STATE_IDLE;
449 		AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
450 			   __func__, AP_QID_CARD(aq->qid),
451 			   AP_QID_QUEUE(aq->qid), aq->assoc_idx);
452 		return AP_SM_WAIT_NONE;
453 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
454 		/* association still pending */
455 		return AP_SM_WAIT_LOW_TIMEOUT;
456 	default:
457 		/* reset from 'outside' happened or no idea at all */
458 		aq->assoc_idx = ASSOC_IDX_INVALID;
459 		aq->dev_state = AP_DEV_STATE_ERROR;
460 		aq->last_err_rc = status.response_code;
461 		AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
462 			    __func__, hwinfo.bs,
463 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
464 		return AP_SM_WAIT_NONE;
465 	}
466 }
467 
468 /*
469  * AP state machine jump table
470  */
471 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
472 	[AP_SM_STATE_RESET_START] = {
473 		[AP_SM_EVENT_POLL] = ap_sm_reset,
474 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
475 	},
476 	[AP_SM_STATE_RESET_WAIT] = {
477 		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
478 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
479 	},
480 	[AP_SM_STATE_SETIRQ_WAIT] = {
481 		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
482 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
483 	},
484 	[AP_SM_STATE_IDLE] = {
485 		[AP_SM_EVENT_POLL] = ap_sm_write,
486 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
487 	},
488 	[AP_SM_STATE_WORKING] = {
489 		[AP_SM_EVENT_POLL] = ap_sm_read_write,
490 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
491 	},
492 	[AP_SM_STATE_QUEUE_FULL] = {
493 		[AP_SM_EVENT_POLL] = ap_sm_read,
494 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
495 	},
496 	[AP_SM_STATE_ASSOC_WAIT] = {
497 		[AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
498 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
499 	},
500 };
501 
502 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
503 {
504 	if (aq->config && !aq->chkstop &&
505 	    aq->dev_state > AP_DEV_STATE_UNINITIATED)
506 		return ap_jumptable[aq->sm_state][event](aq);
507 	else
508 		return AP_SM_WAIT_NONE;
509 }
510 
511 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
512 {
513 	enum ap_sm_wait wait;
514 
515 	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
516 		;
517 	return wait;
518 }
519 
520 /*
521  * AP queue related attributes.
522  */
523 static ssize_t request_count_show(struct device *dev,
524 				  struct device_attribute *attr,
525 				  char *buf)
526 {
527 	struct ap_queue *aq = to_ap_queue(dev);
528 	bool valid = false;
529 	u64 req_cnt;
530 
531 	spin_lock_bh(&aq->lock);
532 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
533 		req_cnt = aq->total_request_count;
534 		valid = true;
535 	}
536 	spin_unlock_bh(&aq->lock);
537 
538 	if (valid)
539 		return sysfs_emit(buf, "%llu\n", req_cnt);
540 	else
541 		return sysfs_emit(buf, "-\n");
542 }
543 
544 static ssize_t request_count_store(struct device *dev,
545 				   struct device_attribute *attr,
546 				   const char *buf, size_t count)
547 {
548 	struct ap_queue *aq = to_ap_queue(dev);
549 
550 	spin_lock_bh(&aq->lock);
551 	aq->total_request_count = 0;
552 	spin_unlock_bh(&aq->lock);
553 
554 	return count;
555 }
556 
557 static DEVICE_ATTR_RW(request_count);
558 
559 static ssize_t requestq_count_show(struct device *dev,
560 				   struct device_attribute *attr, char *buf)
561 {
562 	struct ap_queue *aq = to_ap_queue(dev);
563 	unsigned int reqq_cnt = 0;
564 
565 	spin_lock_bh(&aq->lock);
566 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
567 		reqq_cnt = aq->requestq_count;
568 	spin_unlock_bh(&aq->lock);
569 	return sysfs_emit(buf, "%d\n", reqq_cnt);
570 }
571 
572 static DEVICE_ATTR_RO(requestq_count);
573 
574 static ssize_t pendingq_count_show(struct device *dev,
575 				   struct device_attribute *attr, char *buf)
576 {
577 	struct ap_queue *aq = to_ap_queue(dev);
578 	unsigned int penq_cnt = 0;
579 
580 	spin_lock_bh(&aq->lock);
581 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
582 		penq_cnt = aq->pendingq_count;
583 	spin_unlock_bh(&aq->lock);
584 	return sysfs_emit(buf, "%d\n", penq_cnt);
585 }
586 
587 static DEVICE_ATTR_RO(pendingq_count);
588 
589 static ssize_t reset_show(struct device *dev,
590 			  struct device_attribute *attr, char *buf)
591 {
592 	struct ap_queue *aq = to_ap_queue(dev);
593 	int rc = 0;
594 
595 	spin_lock_bh(&aq->lock);
596 	switch (aq->sm_state) {
597 	case AP_SM_STATE_RESET_START:
598 	case AP_SM_STATE_RESET_WAIT:
599 		rc = sysfs_emit(buf, "Reset in progress.\n");
600 		break;
601 	case AP_SM_STATE_WORKING:
602 	case AP_SM_STATE_QUEUE_FULL:
603 		rc = sysfs_emit(buf, "Reset Timer armed.\n");
604 		break;
605 	default:
606 		rc = sysfs_emit(buf, "No Reset Timer set.\n");
607 	}
608 	spin_unlock_bh(&aq->lock);
609 	return rc;
610 }
611 
612 static ssize_t reset_store(struct device *dev,
613 			   struct device_attribute *attr,
614 			   const char *buf, size_t count)
615 {
616 	struct ap_queue *aq = to_ap_queue(dev);
617 
618 	spin_lock_bh(&aq->lock);
619 	__ap_flush_queue(aq);
620 	aq->sm_state = AP_SM_STATE_RESET_START;
621 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
622 	spin_unlock_bh(&aq->lock);
623 
624 	AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
625 		    __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
626 
627 	return count;
628 }
629 
630 static DEVICE_ATTR_RW(reset);
631 
632 static ssize_t interrupt_show(struct device *dev,
633 			      struct device_attribute *attr, char *buf)
634 {
635 	struct ap_queue *aq = to_ap_queue(dev);
636 	struct ap_queue_status status;
637 	int rc = 0;
638 
639 	spin_lock_bh(&aq->lock);
640 	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
641 		rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
642 	} else {
643 		status = ap_tapq(aq->qid, NULL);
644 		if (status.irq_enabled)
645 			rc = sysfs_emit(buf, "Interrupts enabled.\n");
646 		else
647 			rc = sysfs_emit(buf, "Interrupts disabled.\n");
648 	}
649 	spin_unlock_bh(&aq->lock);
650 
651 	return rc;
652 }
653 
654 static DEVICE_ATTR_RO(interrupt);
655 
656 static ssize_t config_show(struct device *dev,
657 			   struct device_attribute *attr, char *buf)
658 {
659 	struct ap_queue *aq = to_ap_queue(dev);
660 	int rc;
661 
662 	spin_lock_bh(&aq->lock);
663 	rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
664 	spin_unlock_bh(&aq->lock);
665 	return rc;
666 }
667 
668 static DEVICE_ATTR_RO(config);
669 
670 static ssize_t chkstop_show(struct device *dev,
671 			    struct device_attribute *attr, char *buf)
672 {
673 	struct ap_queue *aq = to_ap_queue(dev);
674 	int rc;
675 
676 	spin_lock_bh(&aq->lock);
677 	rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
678 	spin_unlock_bh(&aq->lock);
679 	return rc;
680 }
681 
682 static DEVICE_ATTR_RO(chkstop);
683 
684 static ssize_t ap_functions_show(struct device *dev,
685 				 struct device_attribute *attr, char *buf)
686 {
687 	struct ap_queue *aq = to_ap_queue(dev);
688 	struct ap_queue_status status;
689 	struct ap_tapq_hwinfo hwinfo;
690 
691 	status = ap_test_queue(aq->qid, 1, &hwinfo);
692 	if (status.response_code > AP_RESPONSE_BUSY) {
693 		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
694 			   __func__, status.response_code,
695 			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
696 		return -EIO;
697 	}
698 
699 	return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
700 }
701 
702 static DEVICE_ATTR_RO(ap_functions);
703 
704 #ifdef CONFIG_ZCRYPT_DEBUG
705 static ssize_t states_show(struct device *dev,
706 			   struct device_attribute *attr, char *buf)
707 {
708 	struct ap_queue *aq = to_ap_queue(dev);
709 	int rc = 0;
710 
711 	spin_lock_bh(&aq->lock);
712 	/* queue device state */
713 	switch (aq->dev_state) {
714 	case AP_DEV_STATE_UNINITIATED:
715 		rc = sysfs_emit(buf, "UNINITIATED\n");
716 		break;
717 	case AP_DEV_STATE_OPERATING:
718 		rc = sysfs_emit(buf, "OPERATING");
719 		break;
720 	case AP_DEV_STATE_SHUTDOWN:
721 		rc = sysfs_emit(buf, "SHUTDOWN");
722 		break;
723 	case AP_DEV_STATE_ERROR:
724 		rc = sysfs_emit(buf, "ERROR");
725 		break;
726 	default:
727 		rc = sysfs_emit(buf, "UNKNOWN");
728 	}
729 	/* state machine state */
730 	if (aq->dev_state) {
731 		switch (aq->sm_state) {
732 		case AP_SM_STATE_RESET_START:
733 			rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
734 			break;
735 		case AP_SM_STATE_RESET_WAIT:
736 			rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
737 			break;
738 		case AP_SM_STATE_SETIRQ_WAIT:
739 			rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
740 			break;
741 		case AP_SM_STATE_IDLE:
742 			rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
743 			break;
744 		case AP_SM_STATE_WORKING:
745 			rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
746 			break;
747 		case AP_SM_STATE_QUEUE_FULL:
748 			rc += sysfs_emit_at(buf, rc, " [FULL]\n");
749 			break;
750 		case AP_SM_STATE_ASSOC_WAIT:
751 			rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
752 			break;
753 		default:
754 			rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
755 		}
756 	}
757 	spin_unlock_bh(&aq->lock);
758 
759 	return rc;
760 }
761 static DEVICE_ATTR_RO(states);
762 
763 static ssize_t last_err_rc_show(struct device *dev,
764 				struct device_attribute *attr, char *buf)
765 {
766 	struct ap_queue *aq = to_ap_queue(dev);
767 	int rc;
768 
769 	spin_lock_bh(&aq->lock);
770 	rc = aq->last_err_rc;
771 	spin_unlock_bh(&aq->lock);
772 
773 	switch (rc) {
774 	case AP_RESPONSE_NORMAL:
775 		return sysfs_emit(buf, "NORMAL\n");
776 	case AP_RESPONSE_Q_NOT_AVAIL:
777 		return sysfs_emit(buf, "Q_NOT_AVAIL\n");
778 	case AP_RESPONSE_RESET_IN_PROGRESS:
779 		return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
780 	case AP_RESPONSE_DECONFIGURED:
781 		return sysfs_emit(buf, "DECONFIGURED\n");
782 	case AP_RESPONSE_CHECKSTOPPED:
783 		return sysfs_emit(buf, "CHECKSTOPPED\n");
784 	case AP_RESPONSE_BUSY:
785 		return sysfs_emit(buf, "BUSY\n");
786 	case AP_RESPONSE_INVALID_ADDRESS:
787 		return sysfs_emit(buf, "INVALID_ADDRESS\n");
788 	case AP_RESPONSE_OTHERWISE_CHANGED:
789 		return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
790 	case AP_RESPONSE_Q_FULL:
791 		return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
792 	case AP_RESPONSE_INDEX_TOO_BIG:
793 		return sysfs_emit(buf, "INDEX_TOO_BIG\n");
794 	case AP_RESPONSE_NO_FIRST_PART:
795 		return sysfs_emit(buf, "NO_FIRST_PART\n");
796 	case AP_RESPONSE_MESSAGE_TOO_BIG:
797 		return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
798 	case AP_RESPONSE_REQ_FAC_NOT_INST:
799 		return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
800 	default:
801 		return sysfs_emit(buf, "response code %d\n", rc);
802 	}
803 }
804 static DEVICE_ATTR_RO(last_err_rc);
805 #endif
806 
807 static struct attribute *ap_queue_dev_attrs[] = {
808 	&dev_attr_request_count.attr,
809 	&dev_attr_requestq_count.attr,
810 	&dev_attr_pendingq_count.attr,
811 	&dev_attr_reset.attr,
812 	&dev_attr_interrupt.attr,
813 	&dev_attr_config.attr,
814 	&dev_attr_chkstop.attr,
815 	&dev_attr_ap_functions.attr,
816 #ifdef CONFIG_ZCRYPT_DEBUG
817 	&dev_attr_states.attr,
818 	&dev_attr_last_err_rc.attr,
819 #endif
820 	NULL
821 };
822 
823 static struct attribute_group ap_queue_dev_attr_group = {
824 	.attrs = ap_queue_dev_attrs
825 };
826 
827 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
828 	&ap_queue_dev_attr_group,
829 	NULL
830 };
831 
832 static struct device_type ap_queue_type = {
833 	.name = "ap_queue",
834 	.groups = ap_queue_dev_attr_groups,
835 };
836 
837 static ssize_t se_bind_show(struct device *dev,
838 			    struct device_attribute *attr, char *buf)
839 {
840 	struct ap_queue *aq = to_ap_queue(dev);
841 	struct ap_queue_status status;
842 	struct ap_tapq_hwinfo hwinfo;
843 
844 	if (!ap_q_supports_bind(aq))
845 		return sysfs_emit(buf, "-\n");
846 
847 	status = ap_test_queue(aq->qid, 1, &hwinfo);
848 	if (status.response_code > AP_RESPONSE_BUSY) {
849 		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
850 			   __func__, status.response_code,
851 			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
852 		return -EIO;
853 	}
854 
855 	/* update queue's SE bind state */
856 	spin_lock_bh(&aq->lock);
857 	aq->se_bstate = hwinfo.bs;
858 	spin_unlock_bh(&aq->lock);
859 
860 	switch (hwinfo.bs) {
861 	case AP_BS_Q_USABLE:
862 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
863 		return sysfs_emit(buf, "bound\n");
864 	default:
865 		return sysfs_emit(buf, "unbound\n");
866 	}
867 }
868 
869 static ssize_t se_bind_store(struct device *dev,
870 			     struct device_attribute *attr,
871 			     const char *buf, size_t count)
872 {
873 	struct ap_queue *aq = to_ap_queue(dev);
874 	struct ap_queue_status status;
875 	struct ap_tapq_hwinfo hwinfo;
876 	bool value;
877 	int rc;
878 
879 	if (!ap_q_supports_bind(aq))
880 		return -EINVAL;
881 
882 	/* only 0 (unbind) and 1 (bind) allowed */
883 	rc = kstrtobool(buf, &value);
884 	if (rc)
885 		return rc;
886 
887 	if (!value) {
888 		/* Unbind. Set F bit arg and trigger RAPQ */
889 		spin_lock_bh(&aq->lock);
890 		__ap_flush_queue(aq);
891 		aq->rapq_fbit = 1;
892 		_ap_queue_init_state(aq);
893 		rc = count;
894 		goto out;
895 	}
896 
897 	/* Bind. Check current SE bind state */
898 	status = ap_test_queue(aq->qid, 1, &hwinfo);
899 	if (status.response_code) {
900 		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
901 			    __func__, status.response_code,
902 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
903 		return -EIO;
904 	}
905 
906 	/* Update BS state */
907 	spin_lock_bh(&aq->lock);
908 	aq->se_bstate = hwinfo.bs;
909 	if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
910 		AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
911 			    __func__, hwinfo.bs,
912 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
913 		rc = -EINVAL;
914 		goto out;
915 	}
916 
917 	/* Check SM state */
918 	if (aq->sm_state < AP_SM_STATE_IDLE) {
919 		rc = -EBUSY;
920 		goto out;
921 	}
922 
923 	/* invoke BAPQ */
924 	status = ap_bapq(aq->qid);
925 	if (status.response_code) {
926 		AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
927 			    __func__, status.response_code,
928 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
929 		rc = -EIO;
930 		goto out;
931 	}
932 	aq->assoc_idx = ASSOC_IDX_INVALID;
933 
934 	/* verify SE bind state */
935 	status = ap_test_queue(aq->qid, 1, &hwinfo);
936 	if (status.response_code) {
937 		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
938 			    __func__, status.response_code,
939 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
940 		rc = -EIO;
941 		goto out;
942 	}
943 	aq->se_bstate = hwinfo.bs;
944 	if (!(hwinfo.bs == AP_BS_Q_USABLE ||
945 	      hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
946 		AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
947 			    __func__, hwinfo.bs,
948 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
949 		rc = -EIO;
950 		goto out;
951 	}
952 
953 	/* SE bind was successful */
954 	AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
955 		    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
956 	rc = count;
957 
958 out:
959 	spin_unlock_bh(&aq->lock);
960 	return rc;
961 }
962 
963 static DEVICE_ATTR_RW(se_bind);
964 
965 static ssize_t se_associate_show(struct device *dev,
966 				 struct device_attribute *attr, char *buf)
967 {
968 	struct ap_queue *aq = to_ap_queue(dev);
969 	struct ap_queue_status status;
970 	struct ap_tapq_hwinfo hwinfo;
971 
972 	if (!ap_q_supports_assoc(aq))
973 		return sysfs_emit(buf, "-\n");
974 
975 	status = ap_test_queue(aq->qid, 1, &hwinfo);
976 	if (status.response_code > AP_RESPONSE_BUSY) {
977 		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
978 			   __func__, status.response_code,
979 			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
980 		return -EIO;
981 	}
982 
983 	/* update queue's SE bind state */
984 	spin_lock_bh(&aq->lock);
985 	aq->se_bstate = hwinfo.bs;
986 	spin_unlock_bh(&aq->lock);
987 
988 	switch (hwinfo.bs) {
989 	case AP_BS_Q_USABLE:
990 		if (aq->assoc_idx == ASSOC_IDX_INVALID) {
991 			AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
992 			return -EIO;
993 		}
994 		return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
995 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
996 		if (aq->assoc_idx != ASSOC_IDX_INVALID)
997 			return sysfs_emit(buf, "association pending\n");
998 		fallthrough;
999 	default:
1000 		return sysfs_emit(buf, "unassociated\n");
1001 	}
1002 }
1003 
1004 static ssize_t se_associate_store(struct device *dev,
1005 				  struct device_attribute *attr,
1006 				  const char *buf, size_t count)
1007 {
1008 	struct ap_queue *aq = to_ap_queue(dev);
1009 	struct ap_queue_status status;
1010 	struct ap_tapq_hwinfo hwinfo;
1011 	unsigned int value;
1012 	int rc;
1013 
1014 	if (!ap_q_supports_assoc(aq))
1015 		return -EINVAL;
1016 
1017 	/* association index needs to be >= 0 */
1018 	rc = kstrtouint(buf, 0, &value);
1019 	if (rc)
1020 		return rc;
1021 	if (value >= ASSOC_IDX_INVALID)
1022 		return -EINVAL;
1023 
1024 	/* check current SE bind state */
1025 	status = ap_test_queue(aq->qid, 1, &hwinfo);
1026 	if (status.response_code) {
1027 		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1028 			    __func__, status.response_code,
1029 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1030 		return -EIO;
1031 	}
1032 	spin_lock_bh(&aq->lock);
1033 	aq->se_bstate = hwinfo.bs;
1034 	if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
1035 		AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
1036 			    __func__, hwinfo.bs,
1037 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1038 		rc = -EINVAL;
1039 		goto out;
1040 	}
1041 
1042 	/* check SM state */
1043 	if (aq->sm_state != AP_SM_STATE_IDLE) {
1044 		rc = -EBUSY;
1045 		goto out;
1046 	}
1047 
1048 	/* trigger the asynchronous association request */
1049 	status = ap_aapq(aq->qid, value);
1050 	switch (status.response_code) {
1051 	case AP_RESPONSE_NORMAL:
1052 	case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1053 		aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1054 		aq->assoc_idx = value;
1055 		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1056 		break;
1057 	default:
1058 		AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1059 			    __func__, status.response_code,
1060 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1061 		rc = -EIO;
1062 		goto out;
1063 	}
1064 
1065 	rc = count;
1066 
1067 out:
1068 	spin_unlock_bh(&aq->lock);
1069 	return rc;
1070 }
1071 
1072 static DEVICE_ATTR_RW(se_associate);
1073 
1074 static struct attribute *ap_queue_dev_sb_attrs[] = {
1075 	&dev_attr_se_bind.attr,
1076 	&dev_attr_se_associate.attr,
1077 	NULL
1078 };
1079 
1080 static struct attribute_group ap_queue_dev_sb_attr_group = {
1081 	.attrs = ap_queue_dev_sb_attrs
1082 };
1083 
1084 static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1085 	&ap_queue_dev_sb_attr_group,
1086 	NULL
1087 };
1088 
1089 static void ap_queue_device_release(struct device *dev)
1090 {
1091 	struct ap_queue *aq = to_ap_queue(dev);
1092 
1093 	spin_lock_bh(&ap_queues_lock);
1094 	hash_del(&aq->hnode);
1095 	spin_unlock_bh(&ap_queues_lock);
1096 
1097 	kfree(aq);
1098 }
1099 
1100 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
1101 {
1102 	struct ap_queue *aq;
1103 
1104 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1105 	if (!aq)
1106 		return NULL;
1107 	aq->ap_dev.device.release = ap_queue_device_release;
1108 	aq->ap_dev.device.type = &ap_queue_type;
1109 	aq->ap_dev.device_type = device_type;
1110 	// add optional SE secure binding attributes group
1111 	if (ap_sb_available() && is_prot_virt_guest())
1112 		aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1113 	aq->qid = qid;
1114 	spin_lock_init(&aq->lock);
1115 	INIT_LIST_HEAD(&aq->pendingq);
1116 	INIT_LIST_HEAD(&aq->requestq);
1117 	timer_setup(&aq->timeout, ap_request_timeout, 0);
1118 
1119 	return aq;
1120 }
1121 
1122 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1123 {
1124 	aq->reply = reply;
1125 
1126 	spin_lock_bh(&aq->lock);
1127 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1128 	spin_unlock_bh(&aq->lock);
1129 }
1130 EXPORT_SYMBOL(ap_queue_init_reply);
1131 
1132 /**
1133  * ap_queue_message(): Queue a request to an AP device.
1134  * @aq: The AP device to queue the message to
1135  * @ap_msg: The message that is to be added
1136  */
1137 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1138 {
1139 	int rc = 0;
1140 
1141 	/* msg needs to have a valid receive-callback */
1142 	BUG_ON(!ap_msg->receive);
1143 
1144 	spin_lock_bh(&aq->lock);
1145 
1146 	/* only allow to queue new messages if device state is ok */
1147 	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1148 		list_add_tail(&ap_msg->list, &aq->requestq);
1149 		aq->requestq_count++;
1150 		aq->total_request_count++;
1151 		atomic64_inc(&aq->card->total_request_count);
1152 	} else {
1153 		rc = -ENODEV;
1154 	}
1155 
1156 	/* Send/receive as many request from the queue as possible. */
1157 	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1158 
1159 	spin_unlock_bh(&aq->lock);
1160 
1161 	return rc;
1162 }
1163 EXPORT_SYMBOL(ap_queue_message);
1164 
1165 /**
1166  * ap_queue_usable(): Check if queue is usable just now.
1167  * @aq: The AP queue device to test for usability.
1168  * This function is intended for the scheduler to query if it makes
1169  * sense to enqueue a message into this AP queue device by calling
1170  * ap_queue_message(). The perspective is very short-term as the
1171  * state machine and device state(s) may change at any time.
1172  */
1173 bool ap_queue_usable(struct ap_queue *aq)
1174 {
1175 	bool rc = true;
1176 
1177 	spin_lock_bh(&aq->lock);
1178 
1179 	/* check for not configured or checkstopped */
1180 	if (!aq->config || aq->chkstop) {
1181 		rc = false;
1182 		goto unlock_and_out;
1183 	}
1184 
1185 	/* device state needs to be ok */
1186 	if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1187 		rc = false;
1188 		goto unlock_and_out;
1189 	}
1190 
1191 	/* SE guest's queues additionally need to be bound */
1192 	if (ap_q_needs_bind(aq) &&
1193 	    !(aq->se_bstate == AP_BS_Q_USABLE ||
1194 	      aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
1195 		rc = false;
1196 
1197 unlock_and_out:
1198 	spin_unlock_bh(&aq->lock);
1199 	return rc;
1200 }
1201 EXPORT_SYMBOL(ap_queue_usable);
1202 
1203 /**
1204  * ap_cancel_message(): Cancel a crypto request.
1205  * @aq: The AP device that has the message queued
1206  * @ap_msg: The message that is to be removed
1207  *
1208  * Cancel a crypto request. This is done by removing the request
1209  * from the device pending or request queue. Note that the
1210  * request stays on the AP queue. When it finishes the message
1211  * reply will be discarded because the psmid can't be found.
1212  */
1213 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1214 {
1215 	struct ap_message *tmp;
1216 
1217 	spin_lock_bh(&aq->lock);
1218 	if (!list_empty(&ap_msg->list)) {
1219 		list_for_each_entry(tmp, &aq->pendingq, list)
1220 			if (tmp->psmid == ap_msg->psmid) {
1221 				aq->pendingq_count--;
1222 				goto found;
1223 			}
1224 		aq->requestq_count--;
1225 found:
1226 		list_del_init(&ap_msg->list);
1227 	}
1228 	spin_unlock_bh(&aq->lock);
1229 }
1230 EXPORT_SYMBOL(ap_cancel_message);
1231 
1232 /**
1233  * __ap_flush_queue(): Flush requests.
1234  * @aq: Pointer to the AP queue
1235  *
1236  * Flush all requests from the request/pending queue of an AP device.
1237  */
1238 static void __ap_flush_queue(struct ap_queue *aq)
1239 {
1240 	struct ap_message *ap_msg, *next;
1241 
1242 	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1243 		list_del_init(&ap_msg->list);
1244 		aq->pendingq_count--;
1245 		ap_msg->rc = -EAGAIN;
1246 		ap_msg->receive(aq, ap_msg, NULL);
1247 	}
1248 	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1249 		list_del_init(&ap_msg->list);
1250 		aq->requestq_count--;
1251 		ap_msg->rc = -EAGAIN;
1252 		ap_msg->receive(aq, ap_msg, NULL);
1253 	}
1254 	aq->queue_count = 0;
1255 }
1256 
1257 void ap_flush_queue(struct ap_queue *aq)
1258 {
1259 	spin_lock_bh(&aq->lock);
1260 	__ap_flush_queue(aq);
1261 	spin_unlock_bh(&aq->lock);
1262 }
1263 EXPORT_SYMBOL(ap_flush_queue);
1264 
1265 void ap_queue_prepare_remove(struct ap_queue *aq)
1266 {
1267 	spin_lock_bh(&aq->lock);
1268 	/* flush queue */
1269 	__ap_flush_queue(aq);
1270 	/* move queue device state to SHUTDOWN in progress */
1271 	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1272 	spin_unlock_bh(&aq->lock);
1273 	del_timer_sync(&aq->timeout);
1274 }
1275 
1276 void ap_queue_remove(struct ap_queue *aq)
1277 {
1278 	/*
1279 	 * all messages have been flushed and the device state
1280 	 * is SHUTDOWN. Now reset with zero which also clears
1281 	 * the irq registration and move the device state
1282 	 * to the initial value AP_DEV_STATE_UNINITIATED.
1283 	 */
1284 	spin_lock_bh(&aq->lock);
1285 	ap_zapq(aq->qid, 0);
1286 	aq->dev_state = AP_DEV_STATE_UNINITIATED;
1287 	spin_unlock_bh(&aq->lock);
1288 }
1289 
1290 void _ap_queue_init_state(struct ap_queue *aq)
1291 {
1292 	aq->dev_state = AP_DEV_STATE_OPERATING;
1293 	aq->sm_state = AP_SM_STATE_RESET_START;
1294 	aq->last_err_rc = 0;
1295 	aq->assoc_idx = ASSOC_IDX_INVALID;
1296 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1297 }
1298 
1299 void ap_queue_init_state(struct ap_queue *aq)
1300 {
1301 	spin_lock_bh(&aq->lock);
1302 	_ap_queue_init_state(aq);
1303 	spin_unlock_bh(&aq->lock);
1304 }
1305 EXPORT_SYMBOL(ap_queue_init_state);
1306