xref: /linux/drivers/s390/crypto/ap_queue.c (revision 920c293af8d01942caa10300ad97eabf778e8598)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2016
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  *
6  * Adjunct processor bus, queue related code.
7  */
8 
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15 
16 #include "ap_bus.h"
17 #include "ap_debug.h"
18 
19 static void __ap_flush_queue(struct ap_queue *aq);
20 
21 /**
22  * ap_queue_enable_interruption(): Enable interruption on an AP queue.
23  * @qid: The AP queue number
24  * @ind: the notification indicator byte
25  *
26  * Enables interruption on AP queue via ap_aqic(). Based on the return
27  * value it waits a while and tests the AP queue if interrupts
28  * have been switched on using ap_test_queue().
29  */
30 static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
31 {
32 	struct ap_queue_status status;
33 	struct ap_qirq_ctrl qirqctrl = { 0 };
34 
35 	qirqctrl.ir = 1;
36 	qirqctrl.isc = AP_ISC;
37 	status = ap_aqic(aq->qid, qirqctrl, ind);
38 	switch (status.response_code) {
39 	case AP_RESPONSE_NORMAL:
40 	case AP_RESPONSE_OTHERWISE_CHANGED:
41 		return 0;
42 	case AP_RESPONSE_Q_NOT_AVAIL:
43 	case AP_RESPONSE_DECONFIGURED:
44 	case AP_RESPONSE_CHECKSTOPPED:
45 	case AP_RESPONSE_INVALID_ADDRESS:
46 		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
47 		       AP_QID_CARD(aq->qid),
48 		       AP_QID_QUEUE(aq->qid));
49 		return -EOPNOTSUPP;
50 	case AP_RESPONSE_RESET_IN_PROGRESS:
51 	case AP_RESPONSE_BUSY:
52 	default:
53 		return -EBUSY;
54 	}
55 }
56 
57 /**
58  * __ap_send(): Send message to adjunct processor queue.
59  * @qid: The AP queue number
60  * @psmid: The program supplied message identifier
61  * @msg: The message text
62  * @length: The message length
63  * @special: Special Bit
64  *
65  * Returns AP queue status structure.
66  * Condition code 1 on NQAP can't happen because the L bit is 1.
67  * Condition code 2 on NQAP also means the send is incomplete,
68  * because a segment boundary was reached. The NQAP is repeated.
69  */
70 static inline struct ap_queue_status
71 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
72 	  int special)
73 {
74 	if (special)
75 		qid |= 0x400000UL;
76 	return ap_nqap(qid, psmid, msg, length);
77 }
78 
79 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
80 {
81 	struct ap_queue_status status;
82 
83 	status = __ap_send(qid, psmid, msg, length, 0);
84 	switch (status.response_code) {
85 	case AP_RESPONSE_NORMAL:
86 		return 0;
87 	case AP_RESPONSE_Q_FULL:
88 	case AP_RESPONSE_RESET_IN_PROGRESS:
89 		return -EBUSY;
90 	case AP_RESPONSE_REQ_FAC_NOT_INST:
91 		return -EINVAL;
92 	default:	/* Device is gone. */
93 		return -ENODEV;
94 	}
95 }
96 EXPORT_SYMBOL(ap_send);
97 
98 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
99 {
100 	struct ap_queue_status status;
101 
102 	if (msg == NULL)
103 		return -EINVAL;
104 	status = ap_dqap(qid, psmid, msg, length, NULL, NULL);
105 	switch (status.response_code) {
106 	case AP_RESPONSE_NORMAL:
107 		return 0;
108 	case AP_RESPONSE_NO_PENDING_REPLY:
109 		if (status.queue_empty)
110 			return -ENOENT;
111 		return -EBUSY;
112 	case AP_RESPONSE_RESET_IN_PROGRESS:
113 		return -EBUSY;
114 	default:
115 		return -ENODEV;
116 	}
117 }
118 EXPORT_SYMBOL(ap_recv);
119 
120 /* State machine definitions and helpers */
121 
122 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
123 {
124 	return AP_SM_WAIT_NONE;
125 }
126 
127 /**
128  * ap_sm_recv(): Receive pending reply messages from an AP queue but do
129  *	not change the state of the device.
130  * @aq: pointer to the AP queue
131  *
132  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
133  */
134 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
135 {
136 	struct ap_queue_status status;
137 	struct ap_message *ap_msg;
138 	bool found = false;
139 	size_t reslen;
140 	unsigned long resgr0 = 0;
141 	int parts = 0;
142 
143 	/*
144 	 * DQAP loop until response code and resgr0 indicate that
145 	 * the msg is totally received. As we use the very same buffer
146 	 * the msg is overwritten with each invocation. That's intended
147 	 * and the receiver of the msg is informed with a msg rc code
148 	 * of EMSGSIZE in such a case.
149 	 */
150 	do {
151 		status = ap_dqap(aq->qid, &aq->reply->psmid,
152 				 aq->reply->msg, aq->reply->bufsize,
153 				 &reslen, &resgr0);
154 		parts++;
155 	} while (status.response_code == 0xFF && resgr0 != 0);
156 
157 	switch (status.response_code) {
158 	case AP_RESPONSE_NORMAL:
159 		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
160 		if (aq->queue_count > 0)
161 			mod_timer(&aq->timeout,
162 				  jiffies + aq->request_timeout);
163 		list_for_each_entry(ap_msg, &aq->pendingq, list) {
164 			if (ap_msg->psmid != aq->reply->psmid)
165 				continue;
166 			list_del_init(&ap_msg->list);
167 			aq->pendingq_count--;
168 			if (parts > 1) {
169 				ap_msg->rc = -EMSGSIZE;
170 				ap_msg->receive(aq, ap_msg, NULL);
171 			} else {
172 				ap_msg->receive(aq, ap_msg, aq->reply);
173 			}
174 			found = true;
175 			break;
176 		}
177 		if (!found) {
178 			AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
179 				    __func__, aq->reply->psmid,
180 				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
181 		}
182 		fallthrough;
183 	case AP_RESPONSE_NO_PENDING_REPLY:
184 		if (!status.queue_empty || aq->queue_count <= 0)
185 			break;
186 		/* The card shouldn't forget requests but who knows. */
187 		aq->queue_count = 0;
188 		list_splice_init(&aq->pendingq, &aq->requestq);
189 		aq->requestq_count += aq->pendingq_count;
190 		aq->pendingq_count = 0;
191 		break;
192 	default:
193 		break;
194 	}
195 	return status;
196 }
197 
198 /**
199  * ap_sm_read(): Receive pending reply messages from an AP queue.
200  * @aq: pointer to the AP queue
201  *
202  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
203  */
204 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
205 {
206 	struct ap_queue_status status;
207 
208 	if (!aq->reply)
209 		return AP_SM_WAIT_NONE;
210 	status = ap_sm_recv(aq);
211 	switch (status.response_code) {
212 	case AP_RESPONSE_NORMAL:
213 		if (aq->queue_count > 0) {
214 			aq->sm_state = AP_SM_STATE_WORKING;
215 			return AP_SM_WAIT_AGAIN;
216 		}
217 		aq->sm_state = AP_SM_STATE_IDLE;
218 		return AP_SM_WAIT_NONE;
219 	case AP_RESPONSE_NO_PENDING_REPLY:
220 		if (aq->queue_count > 0)
221 			return AP_SM_WAIT_INTERRUPT;
222 		aq->sm_state = AP_SM_STATE_IDLE;
223 		return AP_SM_WAIT_NONE;
224 	default:
225 		aq->dev_state = AP_DEV_STATE_ERROR;
226 		aq->last_err_rc = status.response_code;
227 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
228 			    __func__, status.response_code,
229 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
230 		return AP_SM_WAIT_NONE;
231 	}
232 }
233 
234 /**
235  * ap_sm_write(): Send messages from the request queue to an AP queue.
236  * @aq: pointer to the AP queue
237  *
238  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
239  */
240 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
241 {
242 	struct ap_queue_status status;
243 	struct ap_message *ap_msg;
244 	ap_qid_t qid = aq->qid;
245 
246 	if (aq->requestq_count <= 0)
247 		return AP_SM_WAIT_NONE;
248 	/* Start the next request on the queue. */
249 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
250 #ifdef CONFIG_ZCRYPT_DEBUG
251 	if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
252 		AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
253 			    __func__, ap_msg->fi.cmd);
254 		qid = 0xFF00;
255 	}
256 #endif
257 	status = __ap_send(qid, ap_msg->psmid,
258 			   ap_msg->msg, ap_msg->len,
259 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
260 	switch (status.response_code) {
261 	case AP_RESPONSE_NORMAL:
262 		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
263 		if (aq->queue_count == 1)
264 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
265 		list_move_tail(&ap_msg->list, &aq->pendingq);
266 		aq->requestq_count--;
267 		aq->pendingq_count++;
268 		if (aq->queue_count < aq->card->queue_depth) {
269 			aq->sm_state = AP_SM_STATE_WORKING;
270 			return AP_SM_WAIT_AGAIN;
271 		}
272 		fallthrough;
273 	case AP_RESPONSE_Q_FULL:
274 		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
275 		return AP_SM_WAIT_INTERRUPT;
276 	case AP_RESPONSE_RESET_IN_PROGRESS:
277 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
278 		return AP_SM_WAIT_TIMEOUT;
279 	case AP_RESPONSE_INVALID_DOMAIN:
280 		AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n");
281 		fallthrough;
282 	case AP_RESPONSE_MESSAGE_TOO_BIG:
283 	case AP_RESPONSE_REQ_FAC_NOT_INST:
284 		list_del_init(&ap_msg->list);
285 		aq->requestq_count--;
286 		ap_msg->rc = -EINVAL;
287 		ap_msg->receive(aq, ap_msg, NULL);
288 		return AP_SM_WAIT_AGAIN;
289 	default:
290 		aq->dev_state = AP_DEV_STATE_ERROR;
291 		aq->last_err_rc = status.response_code;
292 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
293 			    __func__, status.response_code,
294 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
295 		return AP_SM_WAIT_NONE;
296 	}
297 }
298 
299 /**
300  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
301  * @aq: pointer to the AP queue
302  *
303  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
304  */
305 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
306 {
307 	return min(ap_sm_read(aq), ap_sm_write(aq));
308 }
309 
310 /**
311  * ap_sm_reset(): Reset an AP queue.
312  * @qid: The AP queue number
313  *
314  * Submit the Reset command to an AP queue.
315  */
316 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
317 {
318 	struct ap_queue_status status;
319 
320 	status = ap_rapq(aq->qid);
321 	switch (status.response_code) {
322 	case AP_RESPONSE_NORMAL:
323 	case AP_RESPONSE_RESET_IN_PROGRESS:
324 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
325 		aq->interrupt = AP_INTR_DISABLED;
326 		return AP_SM_WAIT_TIMEOUT;
327 	default:
328 		aq->dev_state = AP_DEV_STATE_ERROR;
329 		aq->last_err_rc = status.response_code;
330 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
331 			    __func__, status.response_code,
332 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
333 		return AP_SM_WAIT_NONE;
334 	}
335 }
336 
337 /**
338  * ap_sm_reset_wait(): Test queue for completion of the reset operation
339  * @aq: pointer to the AP queue
340  *
341  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
342  */
343 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
344 {
345 	struct ap_queue_status status;
346 	void *lsi_ptr;
347 
348 	if (aq->queue_count > 0 && aq->reply)
349 		/* Try to read a completed message and get the status */
350 		status = ap_sm_recv(aq);
351 	else
352 		/* Get the status with TAPQ */
353 		status = ap_tapq(aq->qid, NULL);
354 
355 	switch (status.response_code) {
356 	case AP_RESPONSE_NORMAL:
357 		lsi_ptr = ap_airq_ptr();
358 		if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
359 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
360 		else
361 			aq->sm_state = (aq->queue_count > 0) ?
362 				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
363 		return AP_SM_WAIT_AGAIN;
364 	case AP_RESPONSE_BUSY:
365 	case AP_RESPONSE_RESET_IN_PROGRESS:
366 		return AP_SM_WAIT_TIMEOUT;
367 	case AP_RESPONSE_Q_NOT_AVAIL:
368 	case AP_RESPONSE_DECONFIGURED:
369 	case AP_RESPONSE_CHECKSTOPPED:
370 	default:
371 		aq->dev_state = AP_DEV_STATE_ERROR;
372 		aq->last_err_rc = status.response_code;
373 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
374 			    __func__, status.response_code,
375 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
376 		return AP_SM_WAIT_NONE;
377 	}
378 }
379 
380 /**
381  * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
382  * @aq: pointer to the AP queue
383  *
384  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
385  */
386 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
387 {
388 	struct ap_queue_status status;
389 
390 	if (aq->queue_count > 0 && aq->reply)
391 		/* Try to read a completed message and get the status */
392 		status = ap_sm_recv(aq);
393 	else
394 		/* Get the status with TAPQ */
395 		status = ap_tapq(aq->qid, NULL);
396 
397 	if (status.irq_enabled == 1) {
398 		/* Irqs are now enabled */
399 		aq->interrupt = AP_INTR_ENABLED;
400 		aq->sm_state = (aq->queue_count > 0) ?
401 			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
402 	}
403 
404 	switch (status.response_code) {
405 	case AP_RESPONSE_NORMAL:
406 		if (aq->queue_count > 0)
407 			return AP_SM_WAIT_AGAIN;
408 		fallthrough;
409 	case AP_RESPONSE_NO_PENDING_REPLY:
410 		return AP_SM_WAIT_TIMEOUT;
411 	default:
412 		aq->dev_state = AP_DEV_STATE_ERROR;
413 		aq->last_err_rc = status.response_code;
414 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
415 			    __func__, status.response_code,
416 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
417 		return AP_SM_WAIT_NONE;
418 	}
419 }
420 
421 /*
422  * AP state machine jump table
423  */
424 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
425 	[AP_SM_STATE_RESET_START] = {
426 		[AP_SM_EVENT_POLL] = ap_sm_reset,
427 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
428 	},
429 	[AP_SM_STATE_RESET_WAIT] = {
430 		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
431 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
432 	},
433 	[AP_SM_STATE_SETIRQ_WAIT] = {
434 		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
435 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
436 	},
437 	[AP_SM_STATE_IDLE] = {
438 		[AP_SM_EVENT_POLL] = ap_sm_write,
439 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
440 	},
441 	[AP_SM_STATE_WORKING] = {
442 		[AP_SM_EVENT_POLL] = ap_sm_read_write,
443 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
444 	},
445 	[AP_SM_STATE_QUEUE_FULL] = {
446 		[AP_SM_EVENT_POLL] = ap_sm_read,
447 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
448 	},
449 };
450 
451 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
452 {
453 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
454 		return ap_jumptable[aq->sm_state][event](aq);
455 	else
456 		return AP_SM_WAIT_NONE;
457 }
458 
459 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
460 {
461 	enum ap_sm_wait wait;
462 
463 	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
464 		;
465 	return wait;
466 }
467 
468 /*
469  * AP queue related attributes.
470  */
471 static ssize_t request_count_show(struct device *dev,
472 				  struct device_attribute *attr,
473 				  char *buf)
474 {
475 	struct ap_queue *aq = to_ap_queue(dev);
476 	bool valid = false;
477 	u64 req_cnt;
478 
479 	spin_lock_bh(&aq->lock);
480 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
481 		req_cnt = aq->total_request_count;
482 		valid = true;
483 	}
484 	spin_unlock_bh(&aq->lock);
485 
486 	if (valid)
487 		return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
488 	else
489 		return scnprintf(buf, PAGE_SIZE, "-\n");
490 }
491 
492 static ssize_t request_count_store(struct device *dev,
493 				   struct device_attribute *attr,
494 				   const char *buf, size_t count)
495 {
496 	struct ap_queue *aq = to_ap_queue(dev);
497 
498 	spin_lock_bh(&aq->lock);
499 	aq->total_request_count = 0;
500 	spin_unlock_bh(&aq->lock);
501 
502 	return count;
503 }
504 
505 static DEVICE_ATTR_RW(request_count);
506 
507 static ssize_t requestq_count_show(struct device *dev,
508 				   struct device_attribute *attr, char *buf)
509 {
510 	struct ap_queue *aq = to_ap_queue(dev);
511 	unsigned int reqq_cnt = 0;
512 
513 	spin_lock_bh(&aq->lock);
514 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
515 		reqq_cnt = aq->requestq_count;
516 	spin_unlock_bh(&aq->lock);
517 	return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
518 }
519 
520 static DEVICE_ATTR_RO(requestq_count);
521 
522 static ssize_t pendingq_count_show(struct device *dev,
523 				   struct device_attribute *attr, char *buf)
524 {
525 	struct ap_queue *aq = to_ap_queue(dev);
526 	unsigned int penq_cnt = 0;
527 
528 	spin_lock_bh(&aq->lock);
529 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
530 		penq_cnt = aq->pendingq_count;
531 	spin_unlock_bh(&aq->lock);
532 	return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
533 }
534 
535 static DEVICE_ATTR_RO(pendingq_count);
536 
537 static ssize_t reset_show(struct device *dev,
538 			  struct device_attribute *attr, char *buf)
539 {
540 	struct ap_queue *aq = to_ap_queue(dev);
541 	int rc = 0;
542 
543 	spin_lock_bh(&aq->lock);
544 	switch (aq->sm_state) {
545 	case AP_SM_STATE_RESET_START:
546 	case AP_SM_STATE_RESET_WAIT:
547 		rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
548 		break;
549 	case AP_SM_STATE_WORKING:
550 	case AP_SM_STATE_QUEUE_FULL:
551 		rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
552 		break;
553 	default:
554 		rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
555 	}
556 	spin_unlock_bh(&aq->lock);
557 	return rc;
558 }
559 
560 static ssize_t reset_store(struct device *dev,
561 			   struct device_attribute *attr,
562 			   const char *buf, size_t count)
563 {
564 	struct ap_queue *aq = to_ap_queue(dev);
565 
566 	spin_lock_bh(&aq->lock);
567 	__ap_flush_queue(aq);
568 	aq->sm_state = AP_SM_STATE_RESET_START;
569 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
570 	spin_unlock_bh(&aq->lock);
571 
572 	AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
573 	       AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
574 
575 	return count;
576 }
577 
578 static DEVICE_ATTR_RW(reset);
579 
580 static ssize_t interrupt_show(struct device *dev,
581 			      struct device_attribute *attr, char *buf)
582 {
583 	struct ap_queue *aq = to_ap_queue(dev);
584 	int rc = 0;
585 
586 	spin_lock_bh(&aq->lock);
587 	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
588 		rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
589 	else if (aq->interrupt == AP_INTR_ENABLED)
590 		rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
591 	else
592 		rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
593 	spin_unlock_bh(&aq->lock);
594 	return rc;
595 }
596 
597 static DEVICE_ATTR_RO(interrupt);
598 
599 static ssize_t config_show(struct device *dev,
600 			     struct device_attribute *attr, char *buf)
601 {
602 	struct ap_queue *aq = to_ap_queue(dev);
603 	int rc;
604 
605 	spin_lock_bh(&aq->lock);
606 	rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0);
607 	spin_unlock_bh(&aq->lock);
608 	return rc;
609 }
610 
611 static DEVICE_ATTR_RO(config);
612 
613 #ifdef CONFIG_ZCRYPT_DEBUG
614 static ssize_t states_show(struct device *dev,
615 			   struct device_attribute *attr, char *buf)
616 {
617 	struct ap_queue *aq = to_ap_queue(dev);
618 	int rc = 0;
619 
620 	spin_lock_bh(&aq->lock);
621 	/* queue device state */
622 	switch (aq->dev_state) {
623 	case AP_DEV_STATE_UNINITIATED:
624 		rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n");
625 		break;
626 	case AP_DEV_STATE_OPERATING:
627 		rc = scnprintf(buf, PAGE_SIZE, "OPERATING");
628 		break;
629 	case AP_DEV_STATE_SHUTDOWN:
630 		rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN");
631 		break;
632 	case AP_DEV_STATE_ERROR:
633 		rc = scnprintf(buf, PAGE_SIZE, "ERROR");
634 		break;
635 	default:
636 		rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN");
637 	}
638 	/* state machine state */
639 	if (aq->dev_state) {
640 		switch (aq->sm_state) {
641 		case AP_SM_STATE_RESET_START:
642 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
643 					" [RESET_START]\n");
644 			break;
645 		case AP_SM_STATE_RESET_WAIT:
646 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
647 					" [RESET_WAIT]\n");
648 			break;
649 		case AP_SM_STATE_SETIRQ_WAIT:
650 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
651 					" [SETIRQ_WAIT]\n");
652 			break;
653 		case AP_SM_STATE_IDLE:
654 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
655 					" [IDLE]\n");
656 			break;
657 		case AP_SM_STATE_WORKING:
658 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
659 					" [WORKING]\n");
660 			break;
661 		case AP_SM_STATE_QUEUE_FULL:
662 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
663 					" [FULL]\n");
664 			break;
665 		default:
666 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
667 					" [UNKNOWN]\n");
668 		}
669 	}
670 	spin_unlock_bh(&aq->lock);
671 
672 	return rc;
673 }
674 static DEVICE_ATTR_RO(states);
675 
676 static ssize_t last_err_rc_show(struct device *dev,
677 				struct device_attribute *attr, char *buf)
678 {
679 	struct ap_queue *aq = to_ap_queue(dev);
680 	int rc;
681 
682 	spin_lock_bh(&aq->lock);
683 	rc = aq->last_err_rc;
684 	spin_unlock_bh(&aq->lock);
685 
686 	switch (rc) {
687 	case AP_RESPONSE_NORMAL:
688 		return scnprintf(buf, PAGE_SIZE, "NORMAL\n");
689 	case AP_RESPONSE_Q_NOT_AVAIL:
690 		return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n");
691 	case AP_RESPONSE_RESET_IN_PROGRESS:
692 		return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n");
693 	case AP_RESPONSE_DECONFIGURED:
694 		return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n");
695 	case AP_RESPONSE_CHECKSTOPPED:
696 		return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n");
697 	case AP_RESPONSE_BUSY:
698 		return scnprintf(buf, PAGE_SIZE, "BUSY\n");
699 	case AP_RESPONSE_INVALID_ADDRESS:
700 		return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n");
701 	case AP_RESPONSE_OTHERWISE_CHANGED:
702 		return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n");
703 	case AP_RESPONSE_Q_FULL:
704 		return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n");
705 	case AP_RESPONSE_INDEX_TOO_BIG:
706 		return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n");
707 	case AP_RESPONSE_NO_FIRST_PART:
708 		return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n");
709 	case AP_RESPONSE_MESSAGE_TOO_BIG:
710 		return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n");
711 	case AP_RESPONSE_REQ_FAC_NOT_INST:
712 		return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n");
713 	default:
714 		return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc);
715 	}
716 }
717 static DEVICE_ATTR_RO(last_err_rc);
718 #endif
719 
720 static struct attribute *ap_queue_dev_attrs[] = {
721 	&dev_attr_request_count.attr,
722 	&dev_attr_requestq_count.attr,
723 	&dev_attr_pendingq_count.attr,
724 	&dev_attr_reset.attr,
725 	&dev_attr_interrupt.attr,
726 	&dev_attr_config.attr,
727 #ifdef CONFIG_ZCRYPT_DEBUG
728 	&dev_attr_states.attr,
729 	&dev_attr_last_err_rc.attr,
730 #endif
731 	NULL
732 };
733 
734 static struct attribute_group ap_queue_dev_attr_group = {
735 	.attrs = ap_queue_dev_attrs
736 };
737 
738 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
739 	&ap_queue_dev_attr_group,
740 	NULL
741 };
742 
743 static struct device_type ap_queue_type = {
744 	.name = "ap_queue",
745 	.groups = ap_queue_dev_attr_groups,
746 };
747 
748 static void ap_queue_device_release(struct device *dev)
749 {
750 	struct ap_queue *aq = to_ap_queue(dev);
751 
752 	spin_lock_bh(&ap_queues_lock);
753 	hash_del(&aq->hnode);
754 	spin_unlock_bh(&ap_queues_lock);
755 
756 	kfree(aq);
757 }
758 
759 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
760 {
761 	struct ap_queue *aq;
762 
763 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
764 	if (!aq)
765 		return NULL;
766 	aq->ap_dev.device.release = ap_queue_device_release;
767 	aq->ap_dev.device.type = &ap_queue_type;
768 	aq->ap_dev.device_type = device_type;
769 	aq->qid = qid;
770 	aq->interrupt = AP_INTR_DISABLED;
771 	spin_lock_init(&aq->lock);
772 	INIT_LIST_HEAD(&aq->pendingq);
773 	INIT_LIST_HEAD(&aq->requestq);
774 	timer_setup(&aq->timeout, ap_request_timeout, 0);
775 
776 	return aq;
777 }
778 
779 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
780 {
781 	aq->reply = reply;
782 
783 	spin_lock_bh(&aq->lock);
784 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
785 	spin_unlock_bh(&aq->lock);
786 }
787 EXPORT_SYMBOL(ap_queue_init_reply);
788 
789 /**
790  * ap_queue_message(): Queue a request to an AP device.
791  * @aq: The AP device to queue the message to
792  * @ap_msg: The message that is to be added
793  */
794 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
795 {
796 	int rc = 0;
797 
798 	/* msg needs to have a valid receive-callback */
799 	BUG_ON(!ap_msg->receive);
800 
801 	spin_lock_bh(&aq->lock);
802 
803 	/* only allow to queue new messages if device state is ok */
804 	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
805 		list_add_tail(&ap_msg->list, &aq->requestq);
806 		aq->requestq_count++;
807 		aq->total_request_count++;
808 		atomic64_inc(&aq->card->total_request_count);
809 	} else
810 		rc = -ENODEV;
811 
812 	/* Send/receive as many request from the queue as possible. */
813 	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
814 
815 	spin_unlock_bh(&aq->lock);
816 
817 	return rc;
818 }
819 EXPORT_SYMBOL(ap_queue_message);
820 
821 /**
822  * ap_cancel_message(): Cancel a crypto request.
823  * @aq: The AP device that has the message queued
824  * @ap_msg: The message that is to be removed
825  *
826  * Cancel a crypto request. This is done by removing the request
827  * from the device pending or request queue. Note that the
828  * request stays on the AP queue. When it finishes the message
829  * reply will be discarded because the psmid can't be found.
830  */
831 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
832 {
833 	struct ap_message *tmp;
834 
835 	spin_lock_bh(&aq->lock);
836 	if (!list_empty(&ap_msg->list)) {
837 		list_for_each_entry(tmp, &aq->pendingq, list)
838 			if (tmp->psmid == ap_msg->psmid) {
839 				aq->pendingq_count--;
840 				goto found;
841 			}
842 		aq->requestq_count--;
843 found:
844 		list_del_init(&ap_msg->list);
845 	}
846 	spin_unlock_bh(&aq->lock);
847 }
848 EXPORT_SYMBOL(ap_cancel_message);
849 
850 /**
851  * __ap_flush_queue(): Flush requests.
852  * @aq: Pointer to the AP queue
853  *
854  * Flush all requests from the request/pending queue of an AP device.
855  */
856 static void __ap_flush_queue(struct ap_queue *aq)
857 {
858 	struct ap_message *ap_msg, *next;
859 
860 	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
861 		list_del_init(&ap_msg->list);
862 		aq->pendingq_count--;
863 		ap_msg->rc = -EAGAIN;
864 		ap_msg->receive(aq, ap_msg, NULL);
865 	}
866 	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
867 		list_del_init(&ap_msg->list);
868 		aq->requestq_count--;
869 		ap_msg->rc = -EAGAIN;
870 		ap_msg->receive(aq, ap_msg, NULL);
871 	}
872 	aq->queue_count = 0;
873 }
874 
875 void ap_flush_queue(struct ap_queue *aq)
876 {
877 	spin_lock_bh(&aq->lock);
878 	__ap_flush_queue(aq);
879 	spin_unlock_bh(&aq->lock);
880 }
881 EXPORT_SYMBOL(ap_flush_queue);
882 
883 void ap_queue_prepare_remove(struct ap_queue *aq)
884 {
885 	spin_lock_bh(&aq->lock);
886 	/* flush queue */
887 	__ap_flush_queue(aq);
888 	/* move queue device state to SHUTDOWN in progress */
889 	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
890 	spin_unlock_bh(&aq->lock);
891 	del_timer_sync(&aq->timeout);
892 }
893 
894 void ap_queue_remove(struct ap_queue *aq)
895 {
896 	/*
897 	 * all messages have been flushed and the device state
898 	 * is SHUTDOWN. Now reset with zero which also clears
899 	 * the irq registration and move the device state
900 	 * to the initial value AP_DEV_STATE_UNINITIATED.
901 	 */
902 	spin_lock_bh(&aq->lock);
903 	ap_zapq(aq->qid);
904 	aq->dev_state = AP_DEV_STATE_UNINITIATED;
905 	spin_unlock_bh(&aq->lock);
906 }
907 
908 void ap_queue_init_state(struct ap_queue *aq)
909 {
910 	spin_lock_bh(&aq->lock);
911 	aq->dev_state = AP_DEV_STATE_OPERATING;
912 	aq->sm_state = AP_SM_STATE_RESET_START;
913 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
914 	spin_unlock_bh(&aq->lock);
915 }
916 EXPORT_SYMBOL(ap_queue_init_state);
917