1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2016, 2023
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 *
6 * Adjunct processor bus, queue related code.
7 */
8
9 #define pr_fmt(fmt) "ap: " fmt
10
11 #include <linux/export.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15
16 #define CREATE_TRACE_POINTS
17 #include <asm/trace/ap.h>
18
19 #include "ap_bus.h"
20 #include "ap_debug.h"
21
22 EXPORT_TRACEPOINT_SYMBOL(s390_ap_nqap);
23 EXPORT_TRACEPOINT_SYMBOL(s390_ap_dqap);
24
25 static void __ap_flush_queue(struct ap_queue *aq);
26
27 /*
28 * some AP queue helper functions
29 */
30
ap_q_supported_in_se(struct ap_queue * aq)31 static inline bool ap_q_supported_in_se(struct ap_queue *aq)
32 {
33 return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
34 }
35
ap_q_supports_bind(struct ap_queue * aq)36 static inline bool ap_q_supports_bind(struct ap_queue *aq)
37 {
38 return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
39 }
40
ap_q_supports_assoc(struct ap_queue * aq)41 static inline bool ap_q_supports_assoc(struct ap_queue *aq)
42 {
43 return aq->card->hwinfo.ep11;
44 }
45
ap_q_needs_bind(struct ap_queue * aq)46 static inline bool ap_q_needs_bind(struct ap_queue *aq)
47 {
48 return ap_q_supports_bind(aq) && ap_sb_available();
49 }
50
51 /**
52 * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
53 * @aq: The AP queue
54 * @ind: the notification indicator byte
55 *
56 * Enables interruption on AP queue via ap_aqic(). Based on the return
57 * value it waits a while and tests the AP queue if interrupts
58 * have been switched on using ap_test_queue().
59 */
ap_queue_enable_irq(struct ap_queue * aq,void * ind)60 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
61 {
62 union ap_qirq_ctrl qirqctrl = { .value = 0 };
63 struct ap_queue_status status;
64
65 qirqctrl.ir = 1;
66 qirqctrl.isc = AP_ISC;
67 status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
68 if (status.async)
69 return -EPERM;
70 switch (status.response_code) {
71 case AP_RESPONSE_NORMAL:
72 case AP_RESPONSE_OTHERWISE_CHANGED:
73 return 0;
74 case AP_RESPONSE_Q_NOT_AVAIL:
75 case AP_RESPONSE_DECONFIGURED:
76 case AP_RESPONSE_CHECKSTOPPED:
77 case AP_RESPONSE_INVALID_ADDRESS:
78 pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
79 AP_QID_CARD(aq->qid),
80 AP_QID_QUEUE(aq->qid));
81 return -EOPNOTSUPP;
82 case AP_RESPONSE_RESET_IN_PROGRESS:
83 case AP_RESPONSE_BUSY:
84 default:
85 return -EBUSY;
86 }
87 }
88
89 /**
90 * __ap_send(): Send message to adjunct processor queue.
91 * @qid: The AP queue number
92 * @psmid: The program supplied message identifier
93 * @msg: The message text
94 * @msglen: The message length
95 * @special: Special Bit
96 *
97 * Returns AP queue status structure.
98 * Condition code 1 on NQAP can't happen because the L bit is 1.
99 * Condition code 2 on NQAP also means the send is incomplete,
100 * because a segment boundary was reached. The NQAP is repeated.
101 */
102 static inline struct ap_queue_status
__ap_send(ap_qid_t qid,unsigned long psmid,void * msg,size_t msglen,int special)103 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
104 int special)
105 {
106 struct ap_queue_status status;
107
108 if (special)
109 qid |= 0x400000UL;
110
111 status = ap_nqap(qid, psmid, msg, msglen);
112
113 trace_s390_ap_nqap(AP_QID_CARD(qid), AP_QID_QUEUE(qid),
114 status.value, psmid);
115
116 return status;
117 }
118
119 /* State machine definitions and helpers */
120
ap_sm_nop(struct ap_queue * aq)121 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
122 {
123 return AP_SM_WAIT_NONE;
124 }
125
126 /**
127 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
128 * not change the state of the device.
129 * @aq: pointer to the AP queue
130 *
131 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
132 */
ap_sm_recv(struct ap_queue * aq)133 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
134 {
135 struct ap_queue_status status;
136 struct ap_message *ap_msg;
137 bool found = false;
138 size_t reslen;
139 unsigned long resgr0 = 0;
140 int parts = 0;
141
142 /*
143 * DQAP loop until response code and resgr0 indicate that
144 * the msg is totally received. As we use the very same buffer
145 * the msg is overwritten with each invocation. That's intended
146 * and the receiver of the msg is informed with a msg rc code
147 * of EMSGSIZE in such a case.
148 */
149 do {
150 status = ap_dqap(aq->qid, &aq->reply->psmid,
151 aq->reply->msg, aq->reply->bufsize,
152 &aq->reply->len, &reslen, &resgr0);
153 parts++;
154 } while (status.response_code == 0xFF && resgr0 != 0);
155
156 trace_s390_ap_dqap(AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
157 status.value, aq->reply->psmid);
158
159 switch (status.response_code) {
160 case AP_RESPONSE_NORMAL:
161 print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
162 aq->reply->msg, aq->reply->len, false);
163 aq->queue_count = max_t(int, 0, aq->queue_count - 1);
164 if (!status.queue_empty && !aq->queue_count)
165 aq->queue_count++;
166 if (aq->queue_count > 0)
167 mod_timer(&aq->timeout,
168 jiffies + aq->request_timeout);
169 list_for_each_entry(ap_msg, &aq->pendingq, list) {
170 if (ap_msg->psmid != aq->reply->psmid)
171 continue;
172 list_del_init(&ap_msg->list);
173 aq->pendingq_count--;
174 if (parts > 1) {
175 ap_msg->rc = -EMSGSIZE;
176 ap_msg->receive(aq, ap_msg, NULL);
177 } else {
178 ap_msg->receive(aq, ap_msg, aq->reply);
179 }
180 found = true;
181 break;
182 }
183 if (!found) {
184 AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
185 __func__, aq->reply->psmid,
186 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
187 }
188 fallthrough;
189 case AP_RESPONSE_NO_PENDING_REPLY:
190 if (!status.queue_empty || aq->queue_count <= 0)
191 break;
192 /* The card shouldn't forget requests but who knows. */
193 aq->queue_count = 0;
194 list_splice_init(&aq->pendingq, &aq->requestq);
195 aq->requestq_count += aq->pendingq_count;
196 pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
197 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
198 aq->pendingq_count, aq->requestq_count);
199 aq->pendingq_count = 0;
200 break;
201 default:
202 break;
203 }
204 return status;
205 }
206
207 /**
208 * ap_sm_read(): Receive pending reply messages from an AP queue.
209 * @aq: pointer to the AP queue
210 *
211 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
212 */
ap_sm_read(struct ap_queue * aq)213 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
214 {
215 struct ap_queue_status status;
216
217 if (!aq->reply)
218 return AP_SM_WAIT_NONE;
219 status = ap_sm_recv(aq);
220 if (status.async)
221 return AP_SM_WAIT_NONE;
222 switch (status.response_code) {
223 case AP_RESPONSE_NORMAL:
224 if (aq->queue_count > 0) {
225 aq->sm_state = AP_SM_STATE_WORKING;
226 return AP_SM_WAIT_AGAIN;
227 }
228 aq->sm_state = AP_SM_STATE_IDLE;
229 break;
230 case AP_RESPONSE_NO_PENDING_REPLY:
231 if (aq->queue_count > 0)
232 return status.irq_enabled ?
233 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
234 aq->sm_state = AP_SM_STATE_IDLE;
235 break;
236 default:
237 aq->dev_state = AP_DEV_STATE_ERROR;
238 aq->last_err_rc = status.response_code;
239 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
240 __func__, status.response_code,
241 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
242 return AP_SM_WAIT_NONE;
243 }
244 /* Check and maybe enable irq support (again) on this queue */
245 if (!status.irq_enabled && status.queue_empty) {
246 void *lsi_ptr = ap_airq_ptr();
247
248 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
249 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
250 return AP_SM_WAIT_AGAIN;
251 }
252 }
253 return AP_SM_WAIT_NONE;
254 }
255
256 /**
257 * ap_sm_write(): Send messages from the request queue to an AP queue.
258 * @aq: pointer to the AP queue
259 *
260 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
261 */
ap_sm_write(struct ap_queue * aq)262 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
263 {
264 struct ap_queue_status status;
265 struct ap_message *ap_msg;
266 ap_qid_t qid = aq->qid;
267
268 if (aq->requestq_count <= 0)
269 return AP_SM_WAIT_NONE;
270
271 /* Start the next request on the queue. */
272 ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
273 print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1,
274 ap_msg->msg, ap_msg->len, false);
275 status = __ap_send(qid, ap_msg->psmid,
276 ap_msg->msg, ap_msg->len,
277 ap_msg->flags & AP_MSG_FLAG_SPECIAL);
278 if (status.async)
279 return AP_SM_WAIT_NONE;
280 switch (status.response_code) {
281 case AP_RESPONSE_NORMAL:
282 aq->queue_count = max_t(int, 1, aq->queue_count + 1);
283 if (aq->queue_count == 1)
284 mod_timer(&aq->timeout, jiffies + aq->request_timeout);
285 list_move_tail(&ap_msg->list, &aq->pendingq);
286 aq->requestq_count--;
287 aq->pendingq_count++;
288 if (aq->queue_count < aq->card->hwinfo.qd + 1) {
289 aq->sm_state = AP_SM_STATE_WORKING;
290 return AP_SM_WAIT_AGAIN;
291 }
292 fallthrough;
293 case AP_RESPONSE_Q_FULL:
294 aq->sm_state = AP_SM_STATE_QUEUE_FULL;
295 return status.irq_enabled ?
296 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
297 case AP_RESPONSE_RESET_IN_PROGRESS:
298 aq->sm_state = AP_SM_STATE_RESET_WAIT;
299 return AP_SM_WAIT_LOW_TIMEOUT;
300 case AP_RESPONSE_INVALID_DOMAIN:
301 AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
302 fallthrough;
303 case AP_RESPONSE_MESSAGE_TOO_BIG:
304 case AP_RESPONSE_REQ_FAC_NOT_INST:
305 list_del_init(&ap_msg->list);
306 aq->requestq_count--;
307 ap_msg->rc = -EINVAL;
308 ap_msg->receive(aq, ap_msg, NULL);
309 return AP_SM_WAIT_AGAIN;
310 default:
311 aq->dev_state = AP_DEV_STATE_ERROR;
312 aq->last_err_rc = status.response_code;
313 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
314 __func__, status.response_code,
315 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
316 return AP_SM_WAIT_NONE;
317 }
318 }
319
320 /**
321 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
322 * @aq: pointer to the AP queue
323 *
324 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
325 */
ap_sm_read_write(struct ap_queue * aq)326 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
327 {
328 return min(ap_sm_read(aq), ap_sm_write(aq));
329 }
330
331 /**
332 * ap_sm_reset(): Reset an AP queue.
333 * @aq: The AP queue
334 *
335 * Submit the Reset command to an AP queue.
336 */
ap_sm_reset(struct ap_queue * aq)337 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
338 {
339 struct ap_queue_status status;
340
341 status = ap_rapq(aq->qid, aq->rapq_fbit);
342 if (status.async)
343 return AP_SM_WAIT_NONE;
344 switch (status.response_code) {
345 case AP_RESPONSE_NORMAL:
346 case AP_RESPONSE_RESET_IN_PROGRESS:
347 aq->sm_state = AP_SM_STATE_RESET_WAIT;
348 aq->rapq_fbit = 0;
349 return AP_SM_WAIT_LOW_TIMEOUT;
350 default:
351 aq->dev_state = AP_DEV_STATE_ERROR;
352 aq->last_err_rc = status.response_code;
353 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
354 __func__, status.response_code,
355 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
356 return AP_SM_WAIT_NONE;
357 }
358 }
359
360 /**
361 * ap_sm_reset_wait(): Test queue for completion of the reset operation
362 * @aq: pointer to the AP queue
363 *
364 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
365 */
ap_sm_reset_wait(struct ap_queue * aq)366 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
367 {
368 struct ap_queue_status status;
369 struct ap_tapq_hwinfo hwinfo;
370 void *lsi_ptr;
371
372 /* Get the status with TAPQ */
373 status = ap_test_queue(aq->qid, 1, &hwinfo);
374
375 switch (status.response_code) {
376 case AP_RESPONSE_NORMAL:
377 aq->se_bstate = hwinfo.bs;
378 lsi_ptr = ap_airq_ptr();
379 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
380 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
381 else
382 aq->sm_state = (aq->queue_count > 0) ?
383 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
384 return AP_SM_WAIT_AGAIN;
385 case AP_RESPONSE_BUSY:
386 case AP_RESPONSE_RESET_IN_PROGRESS:
387 return AP_SM_WAIT_LOW_TIMEOUT;
388 case AP_RESPONSE_Q_NOT_AVAIL:
389 case AP_RESPONSE_DECONFIGURED:
390 case AP_RESPONSE_CHECKSTOPPED:
391 default:
392 aq->dev_state = AP_DEV_STATE_ERROR;
393 aq->last_err_rc = status.response_code;
394 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
395 __func__, status.response_code,
396 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
397 return AP_SM_WAIT_NONE;
398 }
399 }
400
401 /**
402 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
403 * @aq: pointer to the AP queue
404 *
405 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
406 */
ap_sm_setirq_wait(struct ap_queue * aq)407 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
408 {
409 struct ap_queue_status status;
410
411 if (aq->queue_count > 0 && aq->reply)
412 /* Try to read a completed message and get the status */
413 status = ap_sm_recv(aq);
414 else
415 /* Get the status with TAPQ */
416 status = ap_tapq(aq->qid, NULL);
417
418 if (status.irq_enabled == 1) {
419 /* Irqs are now enabled */
420 aq->sm_state = (aq->queue_count > 0) ?
421 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
422 }
423
424 switch (status.response_code) {
425 case AP_RESPONSE_NORMAL:
426 if (aq->queue_count > 0)
427 return AP_SM_WAIT_AGAIN;
428 fallthrough;
429 case AP_RESPONSE_NO_PENDING_REPLY:
430 return AP_SM_WAIT_LOW_TIMEOUT;
431 default:
432 aq->dev_state = AP_DEV_STATE_ERROR;
433 aq->last_err_rc = status.response_code;
434 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
435 __func__, status.response_code,
436 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
437 return AP_SM_WAIT_NONE;
438 }
439 }
440
441 /**
442 * ap_sm_assoc_wait(): Test queue for completion of a pending
443 * association request.
444 * @aq: pointer to the AP queue
445 */
ap_sm_assoc_wait(struct ap_queue * aq)446 static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
447 {
448 struct ap_queue_status status;
449 struct ap_tapq_hwinfo hwinfo;
450
451 status = ap_test_queue(aq->qid, 1, &hwinfo);
452 /* handle asynchronous error on this queue */
453 if (status.async && status.response_code) {
454 aq->dev_state = AP_DEV_STATE_ERROR;
455 aq->last_err_rc = status.response_code;
456 AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
457 __func__, status.response_code,
458 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
459 return AP_SM_WAIT_NONE;
460 }
461 if (status.response_code > AP_RESPONSE_BUSY) {
462 aq->dev_state = AP_DEV_STATE_ERROR;
463 aq->last_err_rc = status.response_code;
464 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
465 __func__, status.response_code,
466 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
467 return AP_SM_WAIT_NONE;
468 }
469
470 /* update queue's SE bind state */
471 aq->se_bstate = hwinfo.bs;
472
473 /* check bs bits */
474 switch (hwinfo.bs) {
475 case AP_BS_Q_USABLE:
476 /* association is through */
477 aq->sm_state = AP_SM_STATE_IDLE;
478 pr_debug("queue 0x%02x.%04x associated with %u\n",
479 AP_QID_CARD(aq->qid),
480 AP_QID_QUEUE(aq->qid), aq->assoc_idx);
481 return AP_SM_WAIT_NONE;
482 case AP_BS_Q_USABLE_NO_SECURE_KEY:
483 /* association still pending */
484 return AP_SM_WAIT_LOW_TIMEOUT;
485 default:
486 /* reset from 'outside' happened or no idea at all */
487 aq->assoc_idx = ASSOC_IDX_INVALID;
488 aq->dev_state = AP_DEV_STATE_ERROR;
489 aq->last_err_rc = status.response_code;
490 AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
491 __func__, hwinfo.bs,
492 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
493 return AP_SM_WAIT_NONE;
494 }
495 }
496
497 /*
498 * AP state machine jump table
499 */
500 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
501 [AP_SM_STATE_RESET_START] = {
502 [AP_SM_EVENT_POLL] = ap_sm_reset,
503 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
504 },
505 [AP_SM_STATE_RESET_WAIT] = {
506 [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
507 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
508 },
509 [AP_SM_STATE_SETIRQ_WAIT] = {
510 [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
511 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
512 },
513 [AP_SM_STATE_IDLE] = {
514 [AP_SM_EVENT_POLL] = ap_sm_write,
515 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
516 },
517 [AP_SM_STATE_WORKING] = {
518 [AP_SM_EVENT_POLL] = ap_sm_read_write,
519 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
520 },
521 [AP_SM_STATE_QUEUE_FULL] = {
522 [AP_SM_EVENT_POLL] = ap_sm_read,
523 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
524 },
525 [AP_SM_STATE_ASSOC_WAIT] = {
526 [AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
527 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
528 },
529 };
530
ap_sm_event(struct ap_queue * aq,enum ap_sm_event event)531 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
532 {
533 if (aq->config && !aq->chkstop &&
534 aq->dev_state > AP_DEV_STATE_UNINITIATED)
535 return ap_jumptable[aq->sm_state][event](aq);
536 else
537 return AP_SM_WAIT_NONE;
538 }
539
ap_sm_event_loop(struct ap_queue * aq,enum ap_sm_event event)540 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
541 {
542 enum ap_sm_wait wait;
543
544 while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
545 ;
546 return wait;
547 }
548
549 /*
550 * AP queue related attributes.
551 */
request_count_show(struct device * dev,struct device_attribute * attr,char * buf)552 static ssize_t request_count_show(struct device *dev,
553 struct device_attribute *attr,
554 char *buf)
555 {
556 struct ap_queue *aq = to_ap_queue(dev);
557 bool valid = false;
558 u64 req_cnt;
559
560 spin_lock_bh(&aq->lock);
561 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
562 req_cnt = aq->total_request_count;
563 valid = true;
564 }
565 spin_unlock_bh(&aq->lock);
566
567 if (valid)
568 return sysfs_emit(buf, "%llu\n", req_cnt);
569 else
570 return sysfs_emit(buf, "-\n");
571 }
572
request_count_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)573 static ssize_t request_count_store(struct device *dev,
574 struct device_attribute *attr,
575 const char *buf, size_t count)
576 {
577 struct ap_queue *aq = to_ap_queue(dev);
578
579 spin_lock_bh(&aq->lock);
580 aq->total_request_count = 0;
581 spin_unlock_bh(&aq->lock);
582
583 return count;
584 }
585
586 static DEVICE_ATTR_RW(request_count);
587
requestq_count_show(struct device * dev,struct device_attribute * attr,char * buf)588 static ssize_t requestq_count_show(struct device *dev,
589 struct device_attribute *attr, char *buf)
590 {
591 struct ap_queue *aq = to_ap_queue(dev);
592 unsigned int reqq_cnt = 0;
593
594 spin_lock_bh(&aq->lock);
595 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
596 reqq_cnt = aq->requestq_count;
597 spin_unlock_bh(&aq->lock);
598 return sysfs_emit(buf, "%d\n", reqq_cnt);
599 }
600
601 static DEVICE_ATTR_RO(requestq_count);
602
pendingq_count_show(struct device * dev,struct device_attribute * attr,char * buf)603 static ssize_t pendingq_count_show(struct device *dev,
604 struct device_attribute *attr, char *buf)
605 {
606 struct ap_queue *aq = to_ap_queue(dev);
607 unsigned int penq_cnt = 0;
608
609 spin_lock_bh(&aq->lock);
610 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
611 penq_cnt = aq->pendingq_count;
612 spin_unlock_bh(&aq->lock);
613 return sysfs_emit(buf, "%d\n", penq_cnt);
614 }
615
616 static DEVICE_ATTR_RO(pendingq_count);
617
reset_show(struct device * dev,struct device_attribute * attr,char * buf)618 static ssize_t reset_show(struct device *dev,
619 struct device_attribute *attr, char *buf)
620 {
621 struct ap_queue *aq = to_ap_queue(dev);
622 int rc = 0;
623
624 spin_lock_bh(&aq->lock);
625 switch (aq->sm_state) {
626 case AP_SM_STATE_RESET_START:
627 case AP_SM_STATE_RESET_WAIT:
628 rc = sysfs_emit(buf, "Reset in progress.\n");
629 break;
630 case AP_SM_STATE_WORKING:
631 case AP_SM_STATE_QUEUE_FULL:
632 rc = sysfs_emit(buf, "Reset Timer armed.\n");
633 break;
634 default:
635 rc = sysfs_emit(buf, "No Reset Timer set.\n");
636 }
637 spin_unlock_bh(&aq->lock);
638 return rc;
639 }
640
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)641 static ssize_t reset_store(struct device *dev,
642 struct device_attribute *attr,
643 const char *buf, size_t count)
644 {
645 struct ap_queue *aq = to_ap_queue(dev);
646
647 spin_lock_bh(&aq->lock);
648 __ap_flush_queue(aq);
649 aq->sm_state = AP_SM_STATE_RESET_START;
650 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
651 spin_unlock_bh(&aq->lock);
652
653 AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
654 __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
655
656 return count;
657 }
658
659 static DEVICE_ATTR_RW(reset);
660
interrupt_show(struct device * dev,struct device_attribute * attr,char * buf)661 static ssize_t interrupt_show(struct device *dev,
662 struct device_attribute *attr, char *buf)
663 {
664 struct ap_queue *aq = to_ap_queue(dev);
665 struct ap_queue_status status;
666 int rc = 0;
667
668 spin_lock_bh(&aq->lock);
669 if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
670 rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
671 } else {
672 status = ap_tapq(aq->qid, NULL);
673 if (status.irq_enabled)
674 rc = sysfs_emit(buf, "Interrupts enabled.\n");
675 else
676 rc = sysfs_emit(buf, "Interrupts disabled.\n");
677 }
678 spin_unlock_bh(&aq->lock);
679
680 return rc;
681 }
682
683 static DEVICE_ATTR_RO(interrupt);
684
config_show(struct device * dev,struct device_attribute * attr,char * buf)685 static ssize_t config_show(struct device *dev,
686 struct device_attribute *attr, char *buf)
687 {
688 struct ap_queue *aq = to_ap_queue(dev);
689 int rc;
690
691 spin_lock_bh(&aq->lock);
692 rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
693 spin_unlock_bh(&aq->lock);
694 return rc;
695 }
696
697 static DEVICE_ATTR_RO(config);
698
chkstop_show(struct device * dev,struct device_attribute * attr,char * buf)699 static ssize_t chkstop_show(struct device *dev,
700 struct device_attribute *attr, char *buf)
701 {
702 struct ap_queue *aq = to_ap_queue(dev);
703 int rc;
704
705 spin_lock_bh(&aq->lock);
706 rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
707 spin_unlock_bh(&aq->lock);
708 return rc;
709 }
710
711 static DEVICE_ATTR_RO(chkstop);
712
ap_functions_show(struct device * dev,struct device_attribute * attr,char * buf)713 static ssize_t ap_functions_show(struct device *dev,
714 struct device_attribute *attr, char *buf)
715 {
716 struct ap_queue *aq = to_ap_queue(dev);
717 struct ap_queue_status status;
718 struct ap_tapq_hwinfo hwinfo;
719
720 status = ap_test_queue(aq->qid, 1, &hwinfo);
721 if (status.response_code > AP_RESPONSE_BUSY) {
722 pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
723 status.response_code,
724 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
725 return -EIO;
726 }
727
728 return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
729 }
730
731 static DEVICE_ATTR_RO(ap_functions);
732
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)733 static ssize_t driver_override_show(struct device *dev,
734 struct device_attribute *attr,
735 char *buf)
736 {
737 struct ap_queue *aq = to_ap_queue(dev);
738 struct ap_device *ap_dev = &aq->ap_dev;
739 int rc;
740
741 device_lock(dev);
742 if (ap_dev->driver_override)
743 rc = sysfs_emit(buf, "%s\n", ap_dev->driver_override);
744 else
745 rc = sysfs_emit(buf, "\n");
746 device_unlock(dev);
747
748 return rc;
749 }
750
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)751 static ssize_t driver_override_store(struct device *dev,
752 struct device_attribute *attr,
753 const char *buf, size_t count)
754 {
755 struct ap_queue *aq = to_ap_queue(dev);
756 struct ap_device *ap_dev = &aq->ap_dev;
757 int rc = -EINVAL;
758 bool old_value;
759
760 if (mutex_lock_interruptible(&ap_attr_mutex))
761 return -ERESTARTSYS;
762
763 /* Do not allow driver override if apmask/aqmask is in use */
764 if (ap_apmask_aqmask_in_use)
765 goto out;
766
767 old_value = ap_dev->driver_override ? true : false;
768 rc = driver_set_override(dev, &ap_dev->driver_override, buf, count);
769 if (rc)
770 goto out;
771 if (old_value && !ap_dev->driver_override)
772 --ap_driver_override_ctr;
773 else if (!old_value && ap_dev->driver_override)
774 ++ap_driver_override_ctr;
775
776 rc = count;
777
778 out:
779 mutex_unlock(&ap_attr_mutex);
780 return rc;
781 }
782
783 static DEVICE_ATTR_RW(driver_override);
784
785 #ifdef CONFIG_AP_DEBUG
states_show(struct device * dev,struct device_attribute * attr,char * buf)786 static ssize_t states_show(struct device *dev,
787 struct device_attribute *attr, char *buf)
788 {
789 struct ap_queue *aq = to_ap_queue(dev);
790 int rc = 0;
791
792 spin_lock_bh(&aq->lock);
793 /* queue device state */
794 switch (aq->dev_state) {
795 case AP_DEV_STATE_UNINITIATED:
796 rc = sysfs_emit(buf, "UNINITIATED\n");
797 break;
798 case AP_DEV_STATE_OPERATING:
799 rc = sysfs_emit(buf, "OPERATING");
800 break;
801 case AP_DEV_STATE_SHUTDOWN:
802 rc = sysfs_emit(buf, "SHUTDOWN");
803 break;
804 case AP_DEV_STATE_ERROR:
805 rc = sysfs_emit(buf, "ERROR");
806 break;
807 default:
808 rc = sysfs_emit(buf, "UNKNOWN");
809 }
810 /* state machine state */
811 if (aq->dev_state) {
812 switch (aq->sm_state) {
813 case AP_SM_STATE_RESET_START:
814 rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
815 break;
816 case AP_SM_STATE_RESET_WAIT:
817 rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
818 break;
819 case AP_SM_STATE_SETIRQ_WAIT:
820 rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
821 break;
822 case AP_SM_STATE_IDLE:
823 rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
824 break;
825 case AP_SM_STATE_WORKING:
826 rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
827 break;
828 case AP_SM_STATE_QUEUE_FULL:
829 rc += sysfs_emit_at(buf, rc, " [FULL]\n");
830 break;
831 case AP_SM_STATE_ASSOC_WAIT:
832 rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
833 break;
834 default:
835 rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
836 }
837 }
838 spin_unlock_bh(&aq->lock);
839
840 return rc;
841 }
842 static DEVICE_ATTR_RO(states);
843
last_err_rc_show(struct device * dev,struct device_attribute * attr,char * buf)844 static ssize_t last_err_rc_show(struct device *dev,
845 struct device_attribute *attr, char *buf)
846 {
847 struct ap_queue *aq = to_ap_queue(dev);
848 int rc;
849
850 spin_lock_bh(&aq->lock);
851 rc = aq->last_err_rc;
852 spin_unlock_bh(&aq->lock);
853
854 switch (rc) {
855 case AP_RESPONSE_NORMAL:
856 return sysfs_emit(buf, "NORMAL\n");
857 case AP_RESPONSE_Q_NOT_AVAIL:
858 return sysfs_emit(buf, "Q_NOT_AVAIL\n");
859 case AP_RESPONSE_RESET_IN_PROGRESS:
860 return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
861 case AP_RESPONSE_DECONFIGURED:
862 return sysfs_emit(buf, "DECONFIGURED\n");
863 case AP_RESPONSE_CHECKSTOPPED:
864 return sysfs_emit(buf, "CHECKSTOPPED\n");
865 case AP_RESPONSE_BUSY:
866 return sysfs_emit(buf, "BUSY\n");
867 case AP_RESPONSE_INVALID_ADDRESS:
868 return sysfs_emit(buf, "INVALID_ADDRESS\n");
869 case AP_RESPONSE_OTHERWISE_CHANGED:
870 return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
871 case AP_RESPONSE_Q_FULL:
872 return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
873 case AP_RESPONSE_INDEX_TOO_BIG:
874 return sysfs_emit(buf, "INDEX_TOO_BIG\n");
875 case AP_RESPONSE_NO_FIRST_PART:
876 return sysfs_emit(buf, "NO_FIRST_PART\n");
877 case AP_RESPONSE_MESSAGE_TOO_BIG:
878 return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
879 case AP_RESPONSE_REQ_FAC_NOT_INST:
880 return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
881 default:
882 return sysfs_emit(buf, "response code %d\n", rc);
883 }
884 }
885 static DEVICE_ATTR_RO(last_err_rc);
886 #endif
887
888 static struct attribute *ap_queue_dev_attrs[] = {
889 &dev_attr_request_count.attr,
890 &dev_attr_requestq_count.attr,
891 &dev_attr_pendingq_count.attr,
892 &dev_attr_reset.attr,
893 &dev_attr_interrupt.attr,
894 &dev_attr_config.attr,
895 &dev_attr_chkstop.attr,
896 &dev_attr_ap_functions.attr,
897 &dev_attr_driver_override.attr,
898 #ifdef CONFIG_AP_DEBUG
899 &dev_attr_states.attr,
900 &dev_attr_last_err_rc.attr,
901 #endif
902 NULL
903 };
904
905 static struct attribute_group ap_queue_dev_attr_group = {
906 .attrs = ap_queue_dev_attrs
907 };
908
909 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
910 &ap_queue_dev_attr_group,
911 NULL
912 };
913
914 static struct device_type ap_queue_type = {
915 .name = "ap_queue",
916 .groups = ap_queue_dev_attr_groups,
917 };
918
se_bind_show(struct device * dev,struct device_attribute * attr,char * buf)919 static ssize_t se_bind_show(struct device *dev,
920 struct device_attribute *attr, char *buf)
921 {
922 struct ap_queue *aq = to_ap_queue(dev);
923 struct ap_queue_status status;
924 struct ap_tapq_hwinfo hwinfo;
925
926 if (!ap_q_supports_bind(aq))
927 return sysfs_emit(buf, "-\n");
928
929 status = ap_test_queue(aq->qid, 1, &hwinfo);
930 if (status.response_code > AP_RESPONSE_BUSY) {
931 pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
932 status.response_code,
933 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
934 return -EIO;
935 }
936
937 /* update queue's SE bind state */
938 spin_lock_bh(&aq->lock);
939 aq->se_bstate = hwinfo.bs;
940 spin_unlock_bh(&aq->lock);
941
942 switch (hwinfo.bs) {
943 case AP_BS_Q_USABLE:
944 case AP_BS_Q_USABLE_NO_SECURE_KEY:
945 return sysfs_emit(buf, "bound\n");
946 default:
947 return sysfs_emit(buf, "unbound\n");
948 }
949 }
950
se_bind_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)951 static ssize_t se_bind_store(struct device *dev,
952 struct device_attribute *attr,
953 const char *buf, size_t count)
954 {
955 struct ap_queue *aq = to_ap_queue(dev);
956 struct ap_queue_status status;
957 struct ap_tapq_hwinfo hwinfo;
958 bool value;
959 int rc;
960
961 if (!ap_q_supports_bind(aq))
962 return -EINVAL;
963
964 /* only 0 (unbind) and 1 (bind) allowed */
965 rc = kstrtobool(buf, &value);
966 if (rc)
967 return rc;
968
969 if (!value) {
970 /* Unbind. Set F bit arg and trigger RAPQ */
971 spin_lock_bh(&aq->lock);
972 __ap_flush_queue(aq);
973 aq->rapq_fbit = 1;
974 _ap_queue_init_state(aq);
975 rc = count;
976 goto out;
977 }
978
979 /* Bind. Check current SE bind state */
980 status = ap_test_queue(aq->qid, 1, &hwinfo);
981 if (status.response_code) {
982 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
983 __func__, status.response_code,
984 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
985 return -EIO;
986 }
987
988 /* Update BS state */
989 spin_lock_bh(&aq->lock);
990 aq->se_bstate = hwinfo.bs;
991 if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
992 AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
993 __func__, hwinfo.bs,
994 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
995 rc = -EINVAL;
996 goto out;
997 }
998
999 /* Check SM state */
1000 if (aq->sm_state < AP_SM_STATE_IDLE) {
1001 rc = -EBUSY;
1002 goto out;
1003 }
1004
1005 /* invoke BAPQ */
1006 status = ap_bapq(aq->qid);
1007 if (status.response_code) {
1008 AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
1009 __func__, status.response_code,
1010 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1011 rc = -EIO;
1012 goto out;
1013 }
1014 aq->assoc_idx = ASSOC_IDX_INVALID;
1015
1016 /* verify SE bind state */
1017 status = ap_test_queue(aq->qid, 1, &hwinfo);
1018 if (status.response_code) {
1019 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1020 __func__, status.response_code,
1021 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1022 rc = -EIO;
1023 goto out;
1024 }
1025 aq->se_bstate = hwinfo.bs;
1026 if (!(hwinfo.bs == AP_BS_Q_USABLE ||
1027 hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
1028 AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
1029 __func__, hwinfo.bs,
1030 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1031 rc = -EIO;
1032 goto out;
1033 }
1034
1035 /* SE bind was successful */
1036 AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
1037 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1038 rc = count;
1039
1040 out:
1041 spin_unlock_bh(&aq->lock);
1042 return rc;
1043 }
1044
1045 static DEVICE_ATTR_RW(se_bind);
1046
se_associate_show(struct device * dev,struct device_attribute * attr,char * buf)1047 static ssize_t se_associate_show(struct device *dev,
1048 struct device_attribute *attr, char *buf)
1049 {
1050 struct ap_queue *aq = to_ap_queue(dev);
1051 struct ap_queue_status status;
1052 struct ap_tapq_hwinfo hwinfo;
1053
1054 if (!ap_q_supports_assoc(aq))
1055 return sysfs_emit(buf, "-\n");
1056
1057 status = ap_test_queue(aq->qid, 1, &hwinfo);
1058 if (status.response_code > AP_RESPONSE_BUSY) {
1059 pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
1060 status.response_code,
1061 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1062 return -EIO;
1063 }
1064
1065 /* update queue's SE bind state */
1066 spin_lock_bh(&aq->lock);
1067 aq->se_bstate = hwinfo.bs;
1068 spin_unlock_bh(&aq->lock);
1069
1070 switch (hwinfo.bs) {
1071 case AP_BS_Q_USABLE:
1072 if (aq->assoc_idx == ASSOC_IDX_INVALID) {
1073 AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
1074 return -EIO;
1075 }
1076 return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
1077 case AP_BS_Q_USABLE_NO_SECURE_KEY:
1078 if (aq->assoc_idx != ASSOC_IDX_INVALID)
1079 return sysfs_emit(buf, "association pending\n");
1080 fallthrough;
1081 default:
1082 return sysfs_emit(buf, "unassociated\n");
1083 }
1084 }
1085
se_associate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1086 static ssize_t se_associate_store(struct device *dev,
1087 struct device_attribute *attr,
1088 const char *buf, size_t count)
1089 {
1090 struct ap_queue *aq = to_ap_queue(dev);
1091 struct ap_queue_status status;
1092 struct ap_tapq_hwinfo hwinfo;
1093 unsigned int value;
1094 int rc;
1095
1096 if (!ap_q_supports_assoc(aq))
1097 return -EINVAL;
1098
1099 /* association index needs to be >= 0 */
1100 rc = kstrtouint(buf, 0, &value);
1101 if (rc)
1102 return rc;
1103 if (value >= ASSOC_IDX_INVALID)
1104 return -EINVAL;
1105
1106 /* check current SE bind state */
1107 status = ap_test_queue(aq->qid, 1, &hwinfo);
1108 if (status.response_code) {
1109 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1110 __func__, status.response_code,
1111 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1112 return -EIO;
1113 }
1114 spin_lock_bh(&aq->lock);
1115 aq->se_bstate = hwinfo.bs;
1116 if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
1117 AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
1118 __func__, hwinfo.bs,
1119 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1120 rc = -EINVAL;
1121 goto out;
1122 }
1123
1124 /* check SM state */
1125 if (aq->sm_state != AP_SM_STATE_IDLE) {
1126 rc = -EBUSY;
1127 goto out;
1128 }
1129
1130 /* trigger the asynchronous association request */
1131 status = ap_aapq(aq->qid, value);
1132 switch (status.response_code) {
1133 case AP_RESPONSE_NORMAL:
1134 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1135 aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1136 aq->assoc_idx = value;
1137 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1138 break;
1139 default:
1140 AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1141 __func__, status.response_code,
1142 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1143 rc = -EIO;
1144 goto out;
1145 }
1146
1147 rc = count;
1148
1149 out:
1150 spin_unlock_bh(&aq->lock);
1151 return rc;
1152 }
1153
1154 static DEVICE_ATTR_RW(se_associate);
1155
1156 static struct attribute *ap_queue_dev_sb_attrs[] = {
1157 &dev_attr_se_bind.attr,
1158 &dev_attr_se_associate.attr,
1159 NULL
1160 };
1161
1162 static struct attribute_group ap_queue_dev_sb_attr_group = {
1163 .attrs = ap_queue_dev_sb_attrs
1164 };
1165
1166 static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1167 &ap_queue_dev_sb_attr_group,
1168 NULL
1169 };
1170
ap_queue_device_release(struct device * dev)1171 static void ap_queue_device_release(struct device *dev)
1172 {
1173 struct ap_queue *aq = to_ap_queue(dev);
1174
1175 spin_lock_bh(&ap_queues_lock);
1176 hash_del(&aq->hnode);
1177 spin_unlock_bh(&ap_queues_lock);
1178
1179 kfree(aq);
1180 }
1181
ap_queue_create(ap_qid_t qid,struct ap_card * ac)1182 struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac)
1183 {
1184 struct ap_queue *aq;
1185
1186 aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1187 if (!aq)
1188 return NULL;
1189 aq->card = ac;
1190 aq->ap_dev.device.release = ap_queue_device_release;
1191 aq->ap_dev.device.type = &ap_queue_type;
1192 aq->ap_dev.device_type = ac->ap_dev.device_type;
1193 /* in SE environment add bind/associate attributes group */
1194 if (ap_is_se_guest() && ap_q_supported_in_se(aq))
1195 aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1196 aq->qid = qid;
1197 spin_lock_init(&aq->lock);
1198 INIT_LIST_HEAD(&aq->pendingq);
1199 INIT_LIST_HEAD(&aq->requestq);
1200 timer_setup(&aq->timeout, ap_request_timeout, 0);
1201
1202 return aq;
1203 }
1204
ap_queue_init_reply(struct ap_queue * aq,struct ap_message * reply)1205 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1206 {
1207 aq->reply = reply;
1208
1209 spin_lock_bh(&aq->lock);
1210 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1211 spin_unlock_bh(&aq->lock);
1212 }
1213 EXPORT_SYMBOL(ap_queue_init_reply);
1214
1215 /**
1216 * ap_queue_message(): Queue a request to an AP device.
1217 * @aq: The AP device to queue the message to
1218 * @ap_msg: The message that is to be added
1219 */
ap_queue_message(struct ap_queue * aq,struct ap_message * ap_msg)1220 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1221 {
1222 int rc = 0;
1223
1224 /* msg needs to have a valid receive-callback */
1225 BUG_ON(!ap_msg->receive);
1226
1227 spin_lock_bh(&aq->lock);
1228
1229 /* only allow to queue new messages if device state is ok */
1230 if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1231 list_add_tail(&ap_msg->list, &aq->requestq);
1232 aq->requestq_count++;
1233 aq->total_request_count++;
1234 atomic64_inc(&aq->card->total_request_count);
1235 } else {
1236 rc = -ENODEV;
1237 }
1238
1239 /* Send/receive as many request from the queue as possible. */
1240 ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1241
1242 spin_unlock_bh(&aq->lock);
1243
1244 return rc;
1245 }
1246 EXPORT_SYMBOL(ap_queue_message);
1247
1248 /**
1249 * ap_queue_usable(): Check if queue is usable just now.
1250 * @aq: The AP queue device to test for usability.
1251 * This function is intended for the scheduler to query if it makes
1252 * sense to enqueue a message into this AP queue device by calling
1253 * ap_queue_message(). The perspective is very short-term as the
1254 * state machine and device state(s) may change at any time.
1255 */
ap_queue_usable(struct ap_queue * aq)1256 bool ap_queue_usable(struct ap_queue *aq)
1257 {
1258 bool rc = true;
1259
1260 spin_lock_bh(&aq->lock);
1261
1262 /* check for not configured or checkstopped */
1263 if (!aq->config || aq->chkstop) {
1264 rc = false;
1265 goto unlock_and_out;
1266 }
1267
1268 /* device state needs to be ok */
1269 if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1270 rc = false;
1271 goto unlock_and_out;
1272 }
1273
1274 /* SE guest's queues additionally need to be bound */
1275 if (ap_is_se_guest()) {
1276 if (!ap_q_supported_in_se(aq)) {
1277 rc = false;
1278 goto unlock_and_out;
1279 }
1280 if (ap_q_needs_bind(aq) &&
1281 !(aq->se_bstate == AP_BS_Q_USABLE ||
1282 aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
1283 rc = false;
1284 }
1285
1286 unlock_and_out:
1287 spin_unlock_bh(&aq->lock);
1288 return rc;
1289 }
1290 EXPORT_SYMBOL(ap_queue_usable);
1291
1292 /**
1293 * ap_cancel_message(): Cancel a crypto request.
1294 * @aq: The AP device that has the message queued
1295 * @ap_msg: The message that is to be removed
1296 *
1297 * Cancel a crypto request. This is done by removing the request
1298 * from the device pending or request queue. Note that the
1299 * request stays on the AP queue. When it finishes the message
1300 * reply will be discarded because the psmid can't be found.
1301 */
ap_cancel_message(struct ap_queue * aq,struct ap_message * ap_msg)1302 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1303 {
1304 struct ap_message *tmp;
1305
1306 spin_lock_bh(&aq->lock);
1307 if (!list_empty(&ap_msg->list)) {
1308 list_for_each_entry(tmp, &aq->pendingq, list)
1309 if (tmp->psmid == ap_msg->psmid) {
1310 aq->pendingq_count--;
1311 goto found;
1312 }
1313 aq->requestq_count--;
1314 found:
1315 list_del_init(&ap_msg->list);
1316 }
1317 spin_unlock_bh(&aq->lock);
1318 }
1319 EXPORT_SYMBOL(ap_cancel_message);
1320
1321 /**
1322 * __ap_flush_queue(): Flush requests.
1323 * @aq: Pointer to the AP queue
1324 *
1325 * Flush all requests from the request/pending queue of an AP device.
1326 */
__ap_flush_queue(struct ap_queue * aq)1327 static void __ap_flush_queue(struct ap_queue *aq)
1328 {
1329 struct ap_message *ap_msg, *next;
1330
1331 list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1332 list_del_init(&ap_msg->list);
1333 aq->pendingq_count--;
1334 ap_msg->rc = -EAGAIN;
1335 ap_msg->receive(aq, ap_msg, NULL);
1336 }
1337 list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1338 list_del_init(&ap_msg->list);
1339 aq->requestq_count--;
1340 ap_msg->rc = -EAGAIN;
1341 ap_msg->receive(aq, ap_msg, NULL);
1342 }
1343 aq->queue_count = 0;
1344 }
1345
ap_flush_queue(struct ap_queue * aq)1346 void ap_flush_queue(struct ap_queue *aq)
1347 {
1348 spin_lock_bh(&aq->lock);
1349 __ap_flush_queue(aq);
1350 spin_unlock_bh(&aq->lock);
1351 }
1352 EXPORT_SYMBOL(ap_flush_queue);
1353
ap_queue_prepare_remove(struct ap_queue * aq)1354 void ap_queue_prepare_remove(struct ap_queue *aq)
1355 {
1356 spin_lock_bh(&aq->lock);
1357 /* flush queue */
1358 __ap_flush_queue(aq);
1359 /* move queue device state to SHUTDOWN in progress */
1360 aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1361 spin_unlock_bh(&aq->lock);
1362 timer_delete_sync(&aq->timeout);
1363 }
1364
ap_queue_remove(struct ap_queue * aq)1365 void ap_queue_remove(struct ap_queue *aq)
1366 {
1367 /*
1368 * all messages have been flushed and the device state
1369 * is SHUTDOWN. Now reset with zero which also clears
1370 * the irq registration and move the device state
1371 * to the initial value AP_DEV_STATE_UNINITIATED.
1372 */
1373 spin_lock_bh(&aq->lock);
1374 ap_zapq(aq->qid, 0);
1375 aq->dev_state = AP_DEV_STATE_UNINITIATED;
1376 spin_unlock_bh(&aq->lock);
1377 }
1378
_ap_queue_init_state(struct ap_queue * aq)1379 void _ap_queue_init_state(struct ap_queue *aq)
1380 {
1381 aq->dev_state = AP_DEV_STATE_OPERATING;
1382 aq->sm_state = AP_SM_STATE_RESET_START;
1383 aq->last_err_rc = 0;
1384 aq->assoc_idx = ASSOC_IDX_INVALID;
1385 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1386 }
1387
ap_queue_init_state(struct ap_queue * aq)1388 void ap_queue_init_state(struct ap_queue *aq)
1389 {
1390 spin_lock_bh(&aq->lock);
1391 _ap_queue_init_state(aq);
1392 spin_unlock_bh(&aq->lock);
1393 }
1394 EXPORT_SYMBOL(ap_queue_init_state);
1395