xref: /linux/drivers/s390/crypto/ap_queue.c (revision c91c14618fcf4ae3cf3475e5461ea8b41bf76f6d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2016, 2023
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  *
6  * Adjunct processor bus, queue related code.
7  */
8 
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15 
16 #include "ap_bus.h"
17 #include "ap_debug.h"
18 
19 static void __ap_flush_queue(struct ap_queue *aq);
20 
21 /*
22  * some AP queue helper functions
23  */
24 
ap_q_supported_in_se(struct ap_queue * aq)25 static inline bool ap_q_supported_in_se(struct ap_queue *aq)
26 {
27 	return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
28 }
29 
ap_q_supports_bind(struct ap_queue * aq)30 static inline bool ap_q_supports_bind(struct ap_queue *aq)
31 {
32 	return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
33 }
34 
ap_q_supports_assoc(struct ap_queue * aq)35 static inline bool ap_q_supports_assoc(struct ap_queue *aq)
36 {
37 	return aq->card->hwinfo.ep11;
38 }
39 
ap_q_needs_bind(struct ap_queue * aq)40 static inline bool ap_q_needs_bind(struct ap_queue *aq)
41 {
42 	return ap_q_supports_bind(aq) && ap_sb_available();
43 }
44 
45 /**
46  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
47  * @aq: The AP queue
48  * @ind: the notification indicator byte
49  *
50  * Enables interruption on AP queue via ap_aqic(). Based on the return
51  * value it waits a while and tests the AP queue if interrupts
52  * have been switched on using ap_test_queue().
53  */
ap_queue_enable_irq(struct ap_queue * aq,void * ind)54 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
55 {
56 	union ap_qirq_ctrl qirqctrl = { .value = 0 };
57 	struct ap_queue_status status;
58 
59 	qirqctrl.ir = 1;
60 	qirqctrl.isc = AP_ISC;
61 	status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
62 	if (status.async)
63 		return -EPERM;
64 	switch (status.response_code) {
65 	case AP_RESPONSE_NORMAL:
66 	case AP_RESPONSE_OTHERWISE_CHANGED:
67 		return 0;
68 	case AP_RESPONSE_Q_NOT_AVAIL:
69 	case AP_RESPONSE_DECONFIGURED:
70 	case AP_RESPONSE_CHECKSTOPPED:
71 	case AP_RESPONSE_INVALID_ADDRESS:
72 		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
73 		       AP_QID_CARD(aq->qid),
74 		       AP_QID_QUEUE(aq->qid));
75 		return -EOPNOTSUPP;
76 	case AP_RESPONSE_RESET_IN_PROGRESS:
77 	case AP_RESPONSE_BUSY:
78 	default:
79 		return -EBUSY;
80 	}
81 }
82 
83 /**
84  * __ap_send(): Send message to adjunct processor queue.
85  * @qid: The AP queue number
86  * @psmid: The program supplied message identifier
87  * @msg: The message text
88  * @msglen: The message length
89  * @special: Special Bit
90  *
91  * Returns AP queue status structure.
92  * Condition code 1 on NQAP can't happen because the L bit is 1.
93  * Condition code 2 on NQAP also means the send is incomplete,
94  * because a segment boundary was reached. The NQAP is repeated.
95  */
96 static inline struct ap_queue_status
__ap_send(ap_qid_t qid,unsigned long psmid,void * msg,size_t msglen,int special)97 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
98 	  int special)
99 {
100 	if (special)
101 		qid |= 0x400000UL;
102 	return ap_nqap(qid, psmid, msg, msglen);
103 }
104 
105 /* State machine definitions and helpers */
106 
ap_sm_nop(struct ap_queue * aq)107 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
108 {
109 	return AP_SM_WAIT_NONE;
110 }
111 
112 /**
113  * ap_sm_recv(): Receive pending reply messages from an AP queue but do
114  *	not change the state of the device.
115  * @aq: pointer to the AP queue
116  *
117  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
118  */
ap_sm_recv(struct ap_queue * aq)119 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
120 {
121 	struct ap_queue_status status;
122 	struct ap_message *ap_msg;
123 	bool found = false;
124 	size_t reslen;
125 	unsigned long resgr0 = 0;
126 	int parts = 0;
127 
128 	/*
129 	 * DQAP loop until response code and resgr0 indicate that
130 	 * the msg is totally received. As we use the very same buffer
131 	 * the msg is overwritten with each invocation. That's intended
132 	 * and the receiver of the msg is informed with a msg rc code
133 	 * of EMSGSIZE in such a case.
134 	 */
135 	do {
136 		status = ap_dqap(aq->qid, &aq->reply->psmid,
137 				 aq->reply->msg, aq->reply->bufsize,
138 				 &aq->reply->len, &reslen, &resgr0);
139 		parts++;
140 	} while (status.response_code == 0xFF && resgr0 != 0);
141 
142 	switch (status.response_code) {
143 	case AP_RESPONSE_NORMAL:
144 		print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
145 				     aq->reply->msg, aq->reply->len, false);
146 		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
147 		if (!status.queue_empty && !aq->queue_count)
148 			aq->queue_count++;
149 		if (aq->queue_count > 0)
150 			mod_timer(&aq->timeout,
151 				  jiffies + aq->request_timeout);
152 		list_for_each_entry(ap_msg, &aq->pendingq, list) {
153 			if (ap_msg->psmid != aq->reply->psmid)
154 				continue;
155 			list_del_init(&ap_msg->list);
156 			aq->pendingq_count--;
157 			if (parts > 1) {
158 				ap_msg->rc = -EMSGSIZE;
159 				ap_msg->receive(aq, ap_msg, NULL);
160 			} else {
161 				ap_msg->receive(aq, ap_msg, aq->reply);
162 			}
163 			found = true;
164 			break;
165 		}
166 		if (!found) {
167 			AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
168 				    __func__, aq->reply->psmid,
169 				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
170 		}
171 		fallthrough;
172 	case AP_RESPONSE_NO_PENDING_REPLY:
173 		if (!status.queue_empty || aq->queue_count <= 0)
174 			break;
175 		/* The card shouldn't forget requests but who knows. */
176 		aq->queue_count = 0;
177 		list_splice_init(&aq->pendingq, &aq->requestq);
178 		aq->requestq_count += aq->pendingq_count;
179 		pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
180 			 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
181 			 aq->pendingq_count, aq->requestq_count);
182 		aq->pendingq_count = 0;
183 		break;
184 	default:
185 		break;
186 	}
187 	return status;
188 }
189 
190 /**
191  * ap_sm_read(): Receive pending reply messages from an AP queue.
192  * @aq: pointer to the AP queue
193  *
194  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
195  */
ap_sm_read(struct ap_queue * aq)196 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
197 {
198 	struct ap_queue_status status;
199 
200 	if (!aq->reply)
201 		return AP_SM_WAIT_NONE;
202 	status = ap_sm_recv(aq);
203 	if (status.async)
204 		return AP_SM_WAIT_NONE;
205 	switch (status.response_code) {
206 	case AP_RESPONSE_NORMAL:
207 		if (aq->queue_count > 0) {
208 			aq->sm_state = AP_SM_STATE_WORKING;
209 			return AP_SM_WAIT_AGAIN;
210 		}
211 		aq->sm_state = AP_SM_STATE_IDLE;
212 		break;
213 	case AP_RESPONSE_NO_PENDING_REPLY:
214 		if (aq->queue_count > 0)
215 			return status.irq_enabled ?
216 				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
217 		aq->sm_state = AP_SM_STATE_IDLE;
218 		break;
219 	default:
220 		aq->dev_state = AP_DEV_STATE_ERROR;
221 		aq->last_err_rc = status.response_code;
222 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
223 			    __func__, status.response_code,
224 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
225 		return AP_SM_WAIT_NONE;
226 	}
227 	/* Check and maybe enable irq support (again) on this queue */
228 	if (!status.irq_enabled && status.queue_empty) {
229 		void *lsi_ptr = ap_airq_ptr();
230 
231 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
232 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
233 			return AP_SM_WAIT_AGAIN;
234 		}
235 	}
236 	return AP_SM_WAIT_NONE;
237 }
238 
239 /**
240  * ap_sm_write(): Send messages from the request queue to an AP queue.
241  * @aq: pointer to the AP queue
242  *
243  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
244  */
ap_sm_write(struct ap_queue * aq)245 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
246 {
247 	struct ap_queue_status status;
248 	struct ap_message *ap_msg;
249 	ap_qid_t qid = aq->qid;
250 
251 	if (aq->requestq_count <= 0)
252 		return AP_SM_WAIT_NONE;
253 
254 	/* Start the next request on the queue. */
255 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
256 	print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1,
257 			     ap_msg->msg, ap_msg->len, false);
258 	status = __ap_send(qid, ap_msg->psmid,
259 			   ap_msg->msg, ap_msg->len,
260 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
261 	if (status.async)
262 		return AP_SM_WAIT_NONE;
263 	switch (status.response_code) {
264 	case AP_RESPONSE_NORMAL:
265 		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
266 		if (aq->queue_count == 1)
267 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
268 		list_move_tail(&ap_msg->list, &aq->pendingq);
269 		aq->requestq_count--;
270 		aq->pendingq_count++;
271 		if (aq->queue_count < aq->card->hwinfo.qd) {
272 			aq->sm_state = AP_SM_STATE_WORKING;
273 			return AP_SM_WAIT_AGAIN;
274 		}
275 		fallthrough;
276 	case AP_RESPONSE_Q_FULL:
277 		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
278 		return status.irq_enabled ?
279 			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
280 	case AP_RESPONSE_RESET_IN_PROGRESS:
281 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
282 		return AP_SM_WAIT_LOW_TIMEOUT;
283 	case AP_RESPONSE_INVALID_DOMAIN:
284 		AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
285 		fallthrough;
286 	case AP_RESPONSE_MESSAGE_TOO_BIG:
287 	case AP_RESPONSE_REQ_FAC_NOT_INST:
288 		list_del_init(&ap_msg->list);
289 		aq->requestq_count--;
290 		ap_msg->rc = -EINVAL;
291 		ap_msg->receive(aq, ap_msg, NULL);
292 		return AP_SM_WAIT_AGAIN;
293 	default:
294 		aq->dev_state = AP_DEV_STATE_ERROR;
295 		aq->last_err_rc = status.response_code;
296 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
297 			    __func__, status.response_code,
298 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
299 		return AP_SM_WAIT_NONE;
300 	}
301 }
302 
303 /**
304  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
305  * @aq: pointer to the AP queue
306  *
307  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
308  */
ap_sm_read_write(struct ap_queue * aq)309 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
310 {
311 	return min(ap_sm_read(aq), ap_sm_write(aq));
312 }
313 
314 /**
315  * ap_sm_reset(): Reset an AP queue.
316  * @aq: The AP queue
317  *
318  * Submit the Reset command to an AP queue.
319  */
ap_sm_reset(struct ap_queue * aq)320 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
321 {
322 	struct ap_queue_status status;
323 
324 	status = ap_rapq(aq->qid, aq->rapq_fbit);
325 	if (status.async)
326 		return AP_SM_WAIT_NONE;
327 	switch (status.response_code) {
328 	case AP_RESPONSE_NORMAL:
329 	case AP_RESPONSE_RESET_IN_PROGRESS:
330 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
331 		aq->rapq_fbit = 0;
332 		return AP_SM_WAIT_LOW_TIMEOUT;
333 	default:
334 		aq->dev_state = AP_DEV_STATE_ERROR;
335 		aq->last_err_rc = status.response_code;
336 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
337 			    __func__, status.response_code,
338 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
339 		return AP_SM_WAIT_NONE;
340 	}
341 }
342 
343 /**
344  * ap_sm_reset_wait(): Test queue for completion of the reset operation
345  * @aq: pointer to the AP queue
346  *
347  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
348  */
ap_sm_reset_wait(struct ap_queue * aq)349 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
350 {
351 	struct ap_queue_status status;
352 	struct ap_tapq_hwinfo hwinfo;
353 	void *lsi_ptr;
354 
355 	/* Get the status with TAPQ */
356 	status = ap_test_queue(aq->qid, 1, &hwinfo);
357 
358 	switch (status.response_code) {
359 	case AP_RESPONSE_NORMAL:
360 		aq->se_bstate = hwinfo.bs;
361 		lsi_ptr = ap_airq_ptr();
362 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
363 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
364 		else
365 			aq->sm_state = (aq->queue_count > 0) ?
366 				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
367 		return AP_SM_WAIT_AGAIN;
368 	case AP_RESPONSE_BUSY:
369 	case AP_RESPONSE_RESET_IN_PROGRESS:
370 		return AP_SM_WAIT_LOW_TIMEOUT;
371 	case AP_RESPONSE_Q_NOT_AVAIL:
372 	case AP_RESPONSE_DECONFIGURED:
373 	case AP_RESPONSE_CHECKSTOPPED:
374 	default:
375 		aq->dev_state = AP_DEV_STATE_ERROR;
376 		aq->last_err_rc = status.response_code;
377 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
378 			    __func__, status.response_code,
379 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
380 		return AP_SM_WAIT_NONE;
381 	}
382 }
383 
384 /**
385  * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
386  * @aq: pointer to the AP queue
387  *
388  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
389  */
ap_sm_setirq_wait(struct ap_queue * aq)390 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
391 {
392 	struct ap_queue_status status;
393 
394 	if (aq->queue_count > 0 && aq->reply)
395 		/* Try to read a completed message and get the status */
396 		status = ap_sm_recv(aq);
397 	else
398 		/* Get the status with TAPQ */
399 		status = ap_tapq(aq->qid, NULL);
400 
401 	if (status.irq_enabled == 1) {
402 		/* Irqs are now enabled */
403 		aq->sm_state = (aq->queue_count > 0) ?
404 			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
405 	}
406 
407 	switch (status.response_code) {
408 	case AP_RESPONSE_NORMAL:
409 		if (aq->queue_count > 0)
410 			return AP_SM_WAIT_AGAIN;
411 		fallthrough;
412 	case AP_RESPONSE_NO_PENDING_REPLY:
413 		return AP_SM_WAIT_LOW_TIMEOUT;
414 	default:
415 		aq->dev_state = AP_DEV_STATE_ERROR;
416 		aq->last_err_rc = status.response_code;
417 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
418 			    __func__, status.response_code,
419 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
420 		return AP_SM_WAIT_NONE;
421 	}
422 }
423 
424 /**
425  * ap_sm_assoc_wait(): Test queue for completion of a pending
426  *		       association request.
427  * @aq: pointer to the AP queue
428  */
ap_sm_assoc_wait(struct ap_queue * aq)429 static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
430 {
431 	struct ap_queue_status status;
432 	struct ap_tapq_hwinfo hwinfo;
433 
434 	status = ap_test_queue(aq->qid, 1, &hwinfo);
435 	/* handle asynchronous error on this queue */
436 	if (status.async && status.response_code) {
437 		aq->dev_state = AP_DEV_STATE_ERROR;
438 		aq->last_err_rc = status.response_code;
439 		AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
440 			    __func__, status.response_code,
441 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
442 		return AP_SM_WAIT_NONE;
443 	}
444 	if (status.response_code > AP_RESPONSE_BUSY) {
445 		aq->dev_state = AP_DEV_STATE_ERROR;
446 		aq->last_err_rc = status.response_code;
447 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
448 			    __func__, status.response_code,
449 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
450 		return AP_SM_WAIT_NONE;
451 	}
452 
453 	/* update queue's SE bind state */
454 	aq->se_bstate = hwinfo.bs;
455 
456 	/* check bs bits */
457 	switch (hwinfo.bs) {
458 	case AP_BS_Q_USABLE:
459 		/* association is through */
460 		aq->sm_state = AP_SM_STATE_IDLE;
461 		pr_debug("queue 0x%02x.%04x associated with %u\n",
462 			 AP_QID_CARD(aq->qid),
463 			 AP_QID_QUEUE(aq->qid), aq->assoc_idx);
464 		return AP_SM_WAIT_NONE;
465 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
466 		/* association still pending */
467 		return AP_SM_WAIT_LOW_TIMEOUT;
468 	default:
469 		/* reset from 'outside' happened or no idea at all */
470 		aq->assoc_idx = ASSOC_IDX_INVALID;
471 		aq->dev_state = AP_DEV_STATE_ERROR;
472 		aq->last_err_rc = status.response_code;
473 		AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
474 			    __func__, hwinfo.bs,
475 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
476 		return AP_SM_WAIT_NONE;
477 	}
478 }
479 
480 /*
481  * AP state machine jump table
482  */
483 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
484 	[AP_SM_STATE_RESET_START] = {
485 		[AP_SM_EVENT_POLL] = ap_sm_reset,
486 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
487 	},
488 	[AP_SM_STATE_RESET_WAIT] = {
489 		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
490 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
491 	},
492 	[AP_SM_STATE_SETIRQ_WAIT] = {
493 		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
494 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
495 	},
496 	[AP_SM_STATE_IDLE] = {
497 		[AP_SM_EVENT_POLL] = ap_sm_write,
498 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
499 	},
500 	[AP_SM_STATE_WORKING] = {
501 		[AP_SM_EVENT_POLL] = ap_sm_read_write,
502 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
503 	},
504 	[AP_SM_STATE_QUEUE_FULL] = {
505 		[AP_SM_EVENT_POLL] = ap_sm_read,
506 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
507 	},
508 	[AP_SM_STATE_ASSOC_WAIT] = {
509 		[AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
510 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
511 	},
512 };
513 
ap_sm_event(struct ap_queue * aq,enum ap_sm_event event)514 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
515 {
516 	if (aq->config && !aq->chkstop &&
517 	    aq->dev_state > AP_DEV_STATE_UNINITIATED)
518 		return ap_jumptable[aq->sm_state][event](aq);
519 	else
520 		return AP_SM_WAIT_NONE;
521 }
522 
ap_sm_event_loop(struct ap_queue * aq,enum ap_sm_event event)523 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
524 {
525 	enum ap_sm_wait wait;
526 
527 	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
528 		;
529 	return wait;
530 }
531 
532 /*
533  * AP queue related attributes.
534  */
request_count_show(struct device * dev,struct device_attribute * attr,char * buf)535 static ssize_t request_count_show(struct device *dev,
536 				  struct device_attribute *attr,
537 				  char *buf)
538 {
539 	struct ap_queue *aq = to_ap_queue(dev);
540 	bool valid = false;
541 	u64 req_cnt;
542 
543 	spin_lock_bh(&aq->lock);
544 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
545 		req_cnt = aq->total_request_count;
546 		valid = true;
547 	}
548 	spin_unlock_bh(&aq->lock);
549 
550 	if (valid)
551 		return sysfs_emit(buf, "%llu\n", req_cnt);
552 	else
553 		return sysfs_emit(buf, "-\n");
554 }
555 
request_count_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)556 static ssize_t request_count_store(struct device *dev,
557 				   struct device_attribute *attr,
558 				   const char *buf, size_t count)
559 {
560 	struct ap_queue *aq = to_ap_queue(dev);
561 
562 	spin_lock_bh(&aq->lock);
563 	aq->total_request_count = 0;
564 	spin_unlock_bh(&aq->lock);
565 
566 	return count;
567 }
568 
569 static DEVICE_ATTR_RW(request_count);
570 
requestq_count_show(struct device * dev,struct device_attribute * attr,char * buf)571 static ssize_t requestq_count_show(struct device *dev,
572 				   struct device_attribute *attr, char *buf)
573 {
574 	struct ap_queue *aq = to_ap_queue(dev);
575 	unsigned int reqq_cnt = 0;
576 
577 	spin_lock_bh(&aq->lock);
578 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
579 		reqq_cnt = aq->requestq_count;
580 	spin_unlock_bh(&aq->lock);
581 	return sysfs_emit(buf, "%d\n", reqq_cnt);
582 }
583 
584 static DEVICE_ATTR_RO(requestq_count);
585 
pendingq_count_show(struct device * dev,struct device_attribute * attr,char * buf)586 static ssize_t pendingq_count_show(struct device *dev,
587 				   struct device_attribute *attr, char *buf)
588 {
589 	struct ap_queue *aq = to_ap_queue(dev);
590 	unsigned int penq_cnt = 0;
591 
592 	spin_lock_bh(&aq->lock);
593 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
594 		penq_cnt = aq->pendingq_count;
595 	spin_unlock_bh(&aq->lock);
596 	return sysfs_emit(buf, "%d\n", penq_cnt);
597 }
598 
599 static DEVICE_ATTR_RO(pendingq_count);
600 
reset_show(struct device * dev,struct device_attribute * attr,char * buf)601 static ssize_t reset_show(struct device *dev,
602 			  struct device_attribute *attr, char *buf)
603 {
604 	struct ap_queue *aq = to_ap_queue(dev);
605 	int rc = 0;
606 
607 	spin_lock_bh(&aq->lock);
608 	switch (aq->sm_state) {
609 	case AP_SM_STATE_RESET_START:
610 	case AP_SM_STATE_RESET_WAIT:
611 		rc = sysfs_emit(buf, "Reset in progress.\n");
612 		break;
613 	case AP_SM_STATE_WORKING:
614 	case AP_SM_STATE_QUEUE_FULL:
615 		rc = sysfs_emit(buf, "Reset Timer armed.\n");
616 		break;
617 	default:
618 		rc = sysfs_emit(buf, "No Reset Timer set.\n");
619 	}
620 	spin_unlock_bh(&aq->lock);
621 	return rc;
622 }
623 
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)624 static ssize_t reset_store(struct device *dev,
625 			   struct device_attribute *attr,
626 			   const char *buf, size_t count)
627 {
628 	struct ap_queue *aq = to_ap_queue(dev);
629 
630 	spin_lock_bh(&aq->lock);
631 	__ap_flush_queue(aq);
632 	aq->sm_state = AP_SM_STATE_RESET_START;
633 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
634 	spin_unlock_bh(&aq->lock);
635 
636 	AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
637 		    __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
638 
639 	return count;
640 }
641 
642 static DEVICE_ATTR_RW(reset);
643 
interrupt_show(struct device * dev,struct device_attribute * attr,char * buf)644 static ssize_t interrupt_show(struct device *dev,
645 			      struct device_attribute *attr, char *buf)
646 {
647 	struct ap_queue *aq = to_ap_queue(dev);
648 	struct ap_queue_status status;
649 	int rc = 0;
650 
651 	spin_lock_bh(&aq->lock);
652 	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
653 		rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
654 	} else {
655 		status = ap_tapq(aq->qid, NULL);
656 		if (status.irq_enabled)
657 			rc = sysfs_emit(buf, "Interrupts enabled.\n");
658 		else
659 			rc = sysfs_emit(buf, "Interrupts disabled.\n");
660 	}
661 	spin_unlock_bh(&aq->lock);
662 
663 	return rc;
664 }
665 
666 static DEVICE_ATTR_RO(interrupt);
667 
config_show(struct device * dev,struct device_attribute * attr,char * buf)668 static ssize_t config_show(struct device *dev,
669 			   struct device_attribute *attr, char *buf)
670 {
671 	struct ap_queue *aq = to_ap_queue(dev);
672 	int rc;
673 
674 	spin_lock_bh(&aq->lock);
675 	rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
676 	spin_unlock_bh(&aq->lock);
677 	return rc;
678 }
679 
680 static DEVICE_ATTR_RO(config);
681 
chkstop_show(struct device * dev,struct device_attribute * attr,char * buf)682 static ssize_t chkstop_show(struct device *dev,
683 			    struct device_attribute *attr, char *buf)
684 {
685 	struct ap_queue *aq = to_ap_queue(dev);
686 	int rc;
687 
688 	spin_lock_bh(&aq->lock);
689 	rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
690 	spin_unlock_bh(&aq->lock);
691 	return rc;
692 }
693 
694 static DEVICE_ATTR_RO(chkstop);
695 
ap_functions_show(struct device * dev,struct device_attribute * attr,char * buf)696 static ssize_t ap_functions_show(struct device *dev,
697 				 struct device_attribute *attr, char *buf)
698 {
699 	struct ap_queue *aq = to_ap_queue(dev);
700 	struct ap_queue_status status;
701 	struct ap_tapq_hwinfo hwinfo;
702 
703 	status = ap_test_queue(aq->qid, 1, &hwinfo);
704 	if (status.response_code > AP_RESPONSE_BUSY) {
705 		pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
706 			 status.response_code,
707 			 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
708 		return -EIO;
709 	}
710 
711 	return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
712 }
713 
714 static DEVICE_ATTR_RO(ap_functions);
715 
716 #ifdef CONFIG_AP_DEBUG
states_show(struct device * dev,struct device_attribute * attr,char * buf)717 static ssize_t states_show(struct device *dev,
718 			   struct device_attribute *attr, char *buf)
719 {
720 	struct ap_queue *aq = to_ap_queue(dev);
721 	int rc = 0;
722 
723 	spin_lock_bh(&aq->lock);
724 	/* queue device state */
725 	switch (aq->dev_state) {
726 	case AP_DEV_STATE_UNINITIATED:
727 		rc = sysfs_emit(buf, "UNINITIATED\n");
728 		break;
729 	case AP_DEV_STATE_OPERATING:
730 		rc = sysfs_emit(buf, "OPERATING");
731 		break;
732 	case AP_DEV_STATE_SHUTDOWN:
733 		rc = sysfs_emit(buf, "SHUTDOWN");
734 		break;
735 	case AP_DEV_STATE_ERROR:
736 		rc = sysfs_emit(buf, "ERROR");
737 		break;
738 	default:
739 		rc = sysfs_emit(buf, "UNKNOWN");
740 	}
741 	/* state machine state */
742 	if (aq->dev_state) {
743 		switch (aq->sm_state) {
744 		case AP_SM_STATE_RESET_START:
745 			rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
746 			break;
747 		case AP_SM_STATE_RESET_WAIT:
748 			rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
749 			break;
750 		case AP_SM_STATE_SETIRQ_WAIT:
751 			rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
752 			break;
753 		case AP_SM_STATE_IDLE:
754 			rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
755 			break;
756 		case AP_SM_STATE_WORKING:
757 			rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
758 			break;
759 		case AP_SM_STATE_QUEUE_FULL:
760 			rc += sysfs_emit_at(buf, rc, " [FULL]\n");
761 			break;
762 		case AP_SM_STATE_ASSOC_WAIT:
763 			rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
764 			break;
765 		default:
766 			rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
767 		}
768 	}
769 	spin_unlock_bh(&aq->lock);
770 
771 	return rc;
772 }
773 static DEVICE_ATTR_RO(states);
774 
last_err_rc_show(struct device * dev,struct device_attribute * attr,char * buf)775 static ssize_t last_err_rc_show(struct device *dev,
776 				struct device_attribute *attr, char *buf)
777 {
778 	struct ap_queue *aq = to_ap_queue(dev);
779 	int rc;
780 
781 	spin_lock_bh(&aq->lock);
782 	rc = aq->last_err_rc;
783 	spin_unlock_bh(&aq->lock);
784 
785 	switch (rc) {
786 	case AP_RESPONSE_NORMAL:
787 		return sysfs_emit(buf, "NORMAL\n");
788 	case AP_RESPONSE_Q_NOT_AVAIL:
789 		return sysfs_emit(buf, "Q_NOT_AVAIL\n");
790 	case AP_RESPONSE_RESET_IN_PROGRESS:
791 		return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
792 	case AP_RESPONSE_DECONFIGURED:
793 		return sysfs_emit(buf, "DECONFIGURED\n");
794 	case AP_RESPONSE_CHECKSTOPPED:
795 		return sysfs_emit(buf, "CHECKSTOPPED\n");
796 	case AP_RESPONSE_BUSY:
797 		return sysfs_emit(buf, "BUSY\n");
798 	case AP_RESPONSE_INVALID_ADDRESS:
799 		return sysfs_emit(buf, "INVALID_ADDRESS\n");
800 	case AP_RESPONSE_OTHERWISE_CHANGED:
801 		return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
802 	case AP_RESPONSE_Q_FULL:
803 		return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
804 	case AP_RESPONSE_INDEX_TOO_BIG:
805 		return sysfs_emit(buf, "INDEX_TOO_BIG\n");
806 	case AP_RESPONSE_NO_FIRST_PART:
807 		return sysfs_emit(buf, "NO_FIRST_PART\n");
808 	case AP_RESPONSE_MESSAGE_TOO_BIG:
809 		return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
810 	case AP_RESPONSE_REQ_FAC_NOT_INST:
811 		return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
812 	default:
813 		return sysfs_emit(buf, "response code %d\n", rc);
814 	}
815 }
816 static DEVICE_ATTR_RO(last_err_rc);
817 #endif
818 
819 static struct attribute *ap_queue_dev_attrs[] = {
820 	&dev_attr_request_count.attr,
821 	&dev_attr_requestq_count.attr,
822 	&dev_attr_pendingq_count.attr,
823 	&dev_attr_reset.attr,
824 	&dev_attr_interrupt.attr,
825 	&dev_attr_config.attr,
826 	&dev_attr_chkstop.attr,
827 	&dev_attr_ap_functions.attr,
828 #ifdef CONFIG_AP_DEBUG
829 	&dev_attr_states.attr,
830 	&dev_attr_last_err_rc.attr,
831 #endif
832 	NULL
833 };
834 
835 static struct attribute_group ap_queue_dev_attr_group = {
836 	.attrs = ap_queue_dev_attrs
837 };
838 
839 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
840 	&ap_queue_dev_attr_group,
841 	NULL
842 };
843 
844 static struct device_type ap_queue_type = {
845 	.name = "ap_queue",
846 	.groups = ap_queue_dev_attr_groups,
847 };
848 
se_bind_show(struct device * dev,struct device_attribute * attr,char * buf)849 static ssize_t se_bind_show(struct device *dev,
850 			    struct device_attribute *attr, char *buf)
851 {
852 	struct ap_queue *aq = to_ap_queue(dev);
853 	struct ap_queue_status status;
854 	struct ap_tapq_hwinfo hwinfo;
855 
856 	if (!ap_q_supports_bind(aq))
857 		return sysfs_emit(buf, "-\n");
858 
859 	status = ap_test_queue(aq->qid, 1, &hwinfo);
860 	if (status.response_code > AP_RESPONSE_BUSY) {
861 		pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
862 			 status.response_code,
863 			 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
864 		return -EIO;
865 	}
866 
867 	/* update queue's SE bind state */
868 	spin_lock_bh(&aq->lock);
869 	aq->se_bstate = hwinfo.bs;
870 	spin_unlock_bh(&aq->lock);
871 
872 	switch (hwinfo.bs) {
873 	case AP_BS_Q_USABLE:
874 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
875 		return sysfs_emit(buf, "bound\n");
876 	default:
877 		return sysfs_emit(buf, "unbound\n");
878 	}
879 }
880 
se_bind_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)881 static ssize_t se_bind_store(struct device *dev,
882 			     struct device_attribute *attr,
883 			     const char *buf, size_t count)
884 {
885 	struct ap_queue *aq = to_ap_queue(dev);
886 	struct ap_queue_status status;
887 	struct ap_tapq_hwinfo hwinfo;
888 	bool value;
889 	int rc;
890 
891 	if (!ap_q_supports_bind(aq))
892 		return -EINVAL;
893 
894 	/* only 0 (unbind) and 1 (bind) allowed */
895 	rc = kstrtobool(buf, &value);
896 	if (rc)
897 		return rc;
898 
899 	if (!value) {
900 		/* Unbind. Set F bit arg and trigger RAPQ */
901 		spin_lock_bh(&aq->lock);
902 		__ap_flush_queue(aq);
903 		aq->rapq_fbit = 1;
904 		_ap_queue_init_state(aq);
905 		rc = count;
906 		goto out;
907 	}
908 
909 	/* Bind. Check current SE bind state */
910 	status = ap_test_queue(aq->qid, 1, &hwinfo);
911 	if (status.response_code) {
912 		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
913 			    __func__, status.response_code,
914 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
915 		return -EIO;
916 	}
917 
918 	/* Update BS state */
919 	spin_lock_bh(&aq->lock);
920 	aq->se_bstate = hwinfo.bs;
921 	if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
922 		AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
923 			    __func__, hwinfo.bs,
924 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
925 		rc = -EINVAL;
926 		goto out;
927 	}
928 
929 	/* Check SM state */
930 	if (aq->sm_state < AP_SM_STATE_IDLE) {
931 		rc = -EBUSY;
932 		goto out;
933 	}
934 
935 	/* invoke BAPQ */
936 	status = ap_bapq(aq->qid);
937 	if (status.response_code) {
938 		AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
939 			    __func__, status.response_code,
940 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
941 		rc = -EIO;
942 		goto out;
943 	}
944 	aq->assoc_idx = ASSOC_IDX_INVALID;
945 
946 	/* verify SE bind state */
947 	status = ap_test_queue(aq->qid, 1, &hwinfo);
948 	if (status.response_code) {
949 		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
950 			    __func__, status.response_code,
951 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
952 		rc = -EIO;
953 		goto out;
954 	}
955 	aq->se_bstate = hwinfo.bs;
956 	if (!(hwinfo.bs == AP_BS_Q_USABLE ||
957 	      hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
958 		AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
959 			    __func__, hwinfo.bs,
960 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
961 		rc = -EIO;
962 		goto out;
963 	}
964 
965 	/* SE bind was successful */
966 	AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
967 		    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
968 	rc = count;
969 
970 out:
971 	spin_unlock_bh(&aq->lock);
972 	return rc;
973 }
974 
975 static DEVICE_ATTR_RW(se_bind);
976 
se_associate_show(struct device * dev,struct device_attribute * attr,char * buf)977 static ssize_t se_associate_show(struct device *dev,
978 				 struct device_attribute *attr, char *buf)
979 {
980 	struct ap_queue *aq = to_ap_queue(dev);
981 	struct ap_queue_status status;
982 	struct ap_tapq_hwinfo hwinfo;
983 
984 	if (!ap_q_supports_assoc(aq))
985 		return sysfs_emit(buf, "-\n");
986 
987 	status = ap_test_queue(aq->qid, 1, &hwinfo);
988 	if (status.response_code > AP_RESPONSE_BUSY) {
989 		pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
990 			 status.response_code,
991 			 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
992 		return -EIO;
993 	}
994 
995 	/* update queue's SE bind state */
996 	spin_lock_bh(&aq->lock);
997 	aq->se_bstate = hwinfo.bs;
998 	spin_unlock_bh(&aq->lock);
999 
1000 	switch (hwinfo.bs) {
1001 	case AP_BS_Q_USABLE:
1002 		if (aq->assoc_idx == ASSOC_IDX_INVALID) {
1003 			AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
1004 			return -EIO;
1005 		}
1006 		return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
1007 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
1008 		if (aq->assoc_idx != ASSOC_IDX_INVALID)
1009 			return sysfs_emit(buf, "association pending\n");
1010 		fallthrough;
1011 	default:
1012 		return sysfs_emit(buf, "unassociated\n");
1013 	}
1014 }
1015 
se_associate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1016 static ssize_t se_associate_store(struct device *dev,
1017 				  struct device_attribute *attr,
1018 				  const char *buf, size_t count)
1019 {
1020 	struct ap_queue *aq = to_ap_queue(dev);
1021 	struct ap_queue_status status;
1022 	struct ap_tapq_hwinfo hwinfo;
1023 	unsigned int value;
1024 	int rc;
1025 
1026 	if (!ap_q_supports_assoc(aq))
1027 		return -EINVAL;
1028 
1029 	/* association index needs to be >= 0 */
1030 	rc = kstrtouint(buf, 0, &value);
1031 	if (rc)
1032 		return rc;
1033 	if (value >= ASSOC_IDX_INVALID)
1034 		return -EINVAL;
1035 
1036 	/* check current SE bind state */
1037 	status = ap_test_queue(aq->qid, 1, &hwinfo);
1038 	if (status.response_code) {
1039 		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1040 			    __func__, status.response_code,
1041 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1042 		return -EIO;
1043 	}
1044 	spin_lock_bh(&aq->lock);
1045 	aq->se_bstate = hwinfo.bs;
1046 	if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
1047 		AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
1048 			    __func__, hwinfo.bs,
1049 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1050 		rc = -EINVAL;
1051 		goto out;
1052 	}
1053 
1054 	/* check SM state */
1055 	if (aq->sm_state != AP_SM_STATE_IDLE) {
1056 		rc = -EBUSY;
1057 		goto out;
1058 	}
1059 
1060 	/* trigger the asynchronous association request */
1061 	status = ap_aapq(aq->qid, value);
1062 	switch (status.response_code) {
1063 	case AP_RESPONSE_NORMAL:
1064 	case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1065 		aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1066 		aq->assoc_idx = value;
1067 		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1068 		break;
1069 	default:
1070 		AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1071 			    __func__, status.response_code,
1072 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1073 		rc = -EIO;
1074 		goto out;
1075 	}
1076 
1077 	rc = count;
1078 
1079 out:
1080 	spin_unlock_bh(&aq->lock);
1081 	return rc;
1082 }
1083 
1084 static DEVICE_ATTR_RW(se_associate);
1085 
1086 static struct attribute *ap_queue_dev_sb_attrs[] = {
1087 	&dev_attr_se_bind.attr,
1088 	&dev_attr_se_associate.attr,
1089 	NULL
1090 };
1091 
1092 static struct attribute_group ap_queue_dev_sb_attr_group = {
1093 	.attrs = ap_queue_dev_sb_attrs
1094 };
1095 
1096 static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1097 	&ap_queue_dev_sb_attr_group,
1098 	NULL
1099 };
1100 
ap_queue_device_release(struct device * dev)1101 static void ap_queue_device_release(struct device *dev)
1102 {
1103 	struct ap_queue *aq = to_ap_queue(dev);
1104 
1105 	spin_lock_bh(&ap_queues_lock);
1106 	hash_del(&aq->hnode);
1107 	spin_unlock_bh(&ap_queues_lock);
1108 
1109 	kfree(aq);
1110 }
1111 
ap_queue_create(ap_qid_t qid,struct ap_card * ac)1112 struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac)
1113 {
1114 	struct ap_queue *aq;
1115 
1116 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1117 	if (!aq)
1118 		return NULL;
1119 	aq->card = ac;
1120 	aq->ap_dev.device.release = ap_queue_device_release;
1121 	aq->ap_dev.device.type = &ap_queue_type;
1122 	aq->ap_dev.device_type = ac->ap_dev.device_type;
1123 	/* in SE environment add bind/associate attributes group */
1124 	if (ap_is_se_guest() && ap_q_supported_in_se(aq))
1125 		aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1126 	aq->qid = qid;
1127 	spin_lock_init(&aq->lock);
1128 	INIT_LIST_HEAD(&aq->pendingq);
1129 	INIT_LIST_HEAD(&aq->requestq);
1130 	timer_setup(&aq->timeout, ap_request_timeout, 0);
1131 
1132 	return aq;
1133 }
1134 
ap_queue_init_reply(struct ap_queue * aq,struct ap_message * reply)1135 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1136 {
1137 	aq->reply = reply;
1138 
1139 	spin_lock_bh(&aq->lock);
1140 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1141 	spin_unlock_bh(&aq->lock);
1142 }
1143 EXPORT_SYMBOL(ap_queue_init_reply);
1144 
1145 /**
1146  * ap_queue_message(): Queue a request to an AP device.
1147  * @aq: The AP device to queue the message to
1148  * @ap_msg: The message that is to be added
1149  */
ap_queue_message(struct ap_queue * aq,struct ap_message * ap_msg)1150 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1151 {
1152 	int rc = 0;
1153 
1154 	/* msg needs to have a valid receive-callback */
1155 	BUG_ON(!ap_msg->receive);
1156 
1157 	spin_lock_bh(&aq->lock);
1158 
1159 	/* only allow to queue new messages if device state is ok */
1160 	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1161 		list_add_tail(&ap_msg->list, &aq->requestq);
1162 		aq->requestq_count++;
1163 		aq->total_request_count++;
1164 		atomic64_inc(&aq->card->total_request_count);
1165 	} else {
1166 		rc = -ENODEV;
1167 	}
1168 
1169 	/* Send/receive as many request from the queue as possible. */
1170 	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1171 
1172 	spin_unlock_bh(&aq->lock);
1173 
1174 	return rc;
1175 }
1176 EXPORT_SYMBOL(ap_queue_message);
1177 
1178 /**
1179  * ap_queue_usable(): Check if queue is usable just now.
1180  * @aq: The AP queue device to test for usability.
1181  * This function is intended for the scheduler to query if it makes
1182  * sense to enqueue a message into this AP queue device by calling
1183  * ap_queue_message(). The perspective is very short-term as the
1184  * state machine and device state(s) may change at any time.
1185  */
ap_queue_usable(struct ap_queue * aq)1186 bool ap_queue_usable(struct ap_queue *aq)
1187 {
1188 	bool rc = true;
1189 
1190 	spin_lock_bh(&aq->lock);
1191 
1192 	/* check for not configured or checkstopped */
1193 	if (!aq->config || aq->chkstop) {
1194 		rc = false;
1195 		goto unlock_and_out;
1196 	}
1197 
1198 	/* device state needs to be ok */
1199 	if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1200 		rc = false;
1201 		goto unlock_and_out;
1202 	}
1203 
1204 	/* SE guest's queues additionally need to be bound */
1205 	if (ap_is_se_guest()) {
1206 		if (!ap_q_supported_in_se(aq)) {
1207 			rc = false;
1208 			goto unlock_and_out;
1209 		}
1210 		if (ap_q_needs_bind(aq) &&
1211 		    !(aq->se_bstate == AP_BS_Q_USABLE ||
1212 		      aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
1213 			rc = false;
1214 	}
1215 
1216 unlock_and_out:
1217 	spin_unlock_bh(&aq->lock);
1218 	return rc;
1219 }
1220 EXPORT_SYMBOL(ap_queue_usable);
1221 
1222 /**
1223  * ap_cancel_message(): Cancel a crypto request.
1224  * @aq: The AP device that has the message queued
1225  * @ap_msg: The message that is to be removed
1226  *
1227  * Cancel a crypto request. This is done by removing the request
1228  * from the device pending or request queue. Note that the
1229  * request stays on the AP queue. When it finishes the message
1230  * reply will be discarded because the psmid can't be found.
1231  */
ap_cancel_message(struct ap_queue * aq,struct ap_message * ap_msg)1232 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1233 {
1234 	struct ap_message *tmp;
1235 
1236 	spin_lock_bh(&aq->lock);
1237 	if (!list_empty(&ap_msg->list)) {
1238 		list_for_each_entry(tmp, &aq->pendingq, list)
1239 			if (tmp->psmid == ap_msg->psmid) {
1240 				aq->pendingq_count--;
1241 				goto found;
1242 			}
1243 		aq->requestq_count--;
1244 found:
1245 		list_del_init(&ap_msg->list);
1246 	}
1247 	spin_unlock_bh(&aq->lock);
1248 }
1249 EXPORT_SYMBOL(ap_cancel_message);
1250 
1251 /**
1252  * __ap_flush_queue(): Flush requests.
1253  * @aq: Pointer to the AP queue
1254  *
1255  * Flush all requests from the request/pending queue of an AP device.
1256  */
__ap_flush_queue(struct ap_queue * aq)1257 static void __ap_flush_queue(struct ap_queue *aq)
1258 {
1259 	struct ap_message *ap_msg, *next;
1260 
1261 	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1262 		list_del_init(&ap_msg->list);
1263 		aq->pendingq_count--;
1264 		ap_msg->rc = -EAGAIN;
1265 		ap_msg->receive(aq, ap_msg, NULL);
1266 	}
1267 	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1268 		list_del_init(&ap_msg->list);
1269 		aq->requestq_count--;
1270 		ap_msg->rc = -EAGAIN;
1271 		ap_msg->receive(aq, ap_msg, NULL);
1272 	}
1273 	aq->queue_count = 0;
1274 }
1275 
ap_flush_queue(struct ap_queue * aq)1276 void ap_flush_queue(struct ap_queue *aq)
1277 {
1278 	spin_lock_bh(&aq->lock);
1279 	__ap_flush_queue(aq);
1280 	spin_unlock_bh(&aq->lock);
1281 }
1282 EXPORT_SYMBOL(ap_flush_queue);
1283 
ap_queue_prepare_remove(struct ap_queue * aq)1284 void ap_queue_prepare_remove(struct ap_queue *aq)
1285 {
1286 	spin_lock_bh(&aq->lock);
1287 	/* flush queue */
1288 	__ap_flush_queue(aq);
1289 	/* move queue device state to SHUTDOWN in progress */
1290 	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1291 	spin_unlock_bh(&aq->lock);
1292 	del_timer_sync(&aq->timeout);
1293 }
1294 
ap_queue_remove(struct ap_queue * aq)1295 void ap_queue_remove(struct ap_queue *aq)
1296 {
1297 	/*
1298 	 * all messages have been flushed and the device state
1299 	 * is SHUTDOWN. Now reset with zero which also clears
1300 	 * the irq registration and move the device state
1301 	 * to the initial value AP_DEV_STATE_UNINITIATED.
1302 	 */
1303 	spin_lock_bh(&aq->lock);
1304 	ap_zapq(aq->qid, 0);
1305 	aq->dev_state = AP_DEV_STATE_UNINITIATED;
1306 	spin_unlock_bh(&aq->lock);
1307 }
1308 
_ap_queue_init_state(struct ap_queue * aq)1309 void _ap_queue_init_state(struct ap_queue *aq)
1310 {
1311 	aq->dev_state = AP_DEV_STATE_OPERATING;
1312 	aq->sm_state = AP_SM_STATE_RESET_START;
1313 	aq->last_err_rc = 0;
1314 	aq->assoc_idx = ASSOC_IDX_INVALID;
1315 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1316 }
1317 
ap_queue_init_state(struct ap_queue * aq)1318 void ap_queue_init_state(struct ap_queue *aq)
1319 {
1320 	spin_lock_bh(&aq->lock);
1321 	_ap_queue_init_state(aq);
1322 	spin_unlock_bh(&aq->lock);
1323 }
1324 EXPORT_SYMBOL(ap_queue_init_state);
1325