xref: /linux/drivers/s390/scsi/zfcp_qdio.c (revision b77e0ce62d63a761ffb7f7245a215a49f5921c2f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * zfcp device driver
4  *
5  * Setup and helper functions to access QDIO.
6  *
7  * Copyright IBM Corp. 2002, 2020
8  */
9 
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 
13 #include <linux/lockdep.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include "zfcp_ext.h"
17 #include "zfcp_qdio.h"
18 
19 static bool enable_multibuffer = true;
20 module_param_named(datarouter, enable_multibuffer, bool, 0400);
21 MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
22 
23 static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
24 				    unsigned int qdio_err)
25 {
26 	struct zfcp_adapter *adapter = qdio->adapter;
27 
28 	dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
29 
30 	if (qdio_err & QDIO_ERROR_SLSB_STATE) {
31 		zfcp_qdio_siosl(adapter);
32 		zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
33 		return;
34 	}
35 	zfcp_erp_adapter_reopen(adapter,
36 				ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
37 				ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
38 }
39 
40 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
41 {
42 	int i, sbal_idx;
43 
44 	for (i = first; i < first + cnt; i++) {
45 		sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
46 		memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
47 	}
48 }
49 
50 /* this needs to be called prior to updating the queue fill level */
51 static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
52 {
53 	unsigned long long now, span;
54 	int used;
55 
56 	now = get_tod_clock_monotonic();
57 	span = (now - qdio->req_q_time) >> 12;
58 	used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
59 	qdio->req_q_util += used * span;
60 	qdio->req_q_time = now;
61 }
62 
63 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
64 			      int queue_no, int idx, int count,
65 			      unsigned long parm)
66 {
67 	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
68 
69 	if (unlikely(qdio_err)) {
70 		zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
71 		return;
72 	}
73 
74 	/* cleanup all SBALs being program-owned now */
75 	zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
76 
77 	spin_lock_irq(&qdio->stat_lock);
78 	zfcp_qdio_account(qdio);
79 	spin_unlock_irq(&qdio->stat_lock);
80 	atomic_add(count, &qdio->req_q_free);
81 	wake_up(&qdio->req_q_wq);
82 }
83 
84 static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
85 			       int queue_no, int idx, int count,
86 			       unsigned long parm)
87 {
88 	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
89 	struct zfcp_adapter *adapter = qdio->adapter;
90 	int sbal_no, sbal_idx;
91 
92 	if (unlikely(qdio_err)) {
93 		if (zfcp_adapter_multi_buffer_active(adapter)) {
94 			void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
95 			struct qdio_buffer_element *sbale;
96 			u64 req_id;
97 			u8 scount;
98 
99 			memset(pl, 0,
100 			       ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
101 			sbale = qdio->res_q[idx]->element;
102 			req_id = sbale->addr;
103 			scount = min(sbale->scount + 1,
104 				     ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
105 				     /* incl. signaling SBAL */
106 
107 			for (sbal_no = 0; sbal_no < scount; sbal_no++) {
108 				sbal_idx = (idx + sbal_no) %
109 					QDIO_MAX_BUFFERS_PER_Q;
110 				pl[sbal_no] = qdio->res_q[sbal_idx];
111 			}
112 			zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
113 		}
114 		zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
115 		return;
116 	}
117 
118 	/*
119 	 * go through all SBALs from input queue currently
120 	 * returned by QDIO layer
121 	 */
122 	for (sbal_no = 0; sbal_no < count; sbal_no++) {
123 		sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
124 		/* go through all SBALEs of SBAL */
125 		zfcp_fsf_reqid_check(qdio, sbal_idx);
126 	}
127 
128 	/*
129 	 * put SBALs back to response queue
130 	 */
131 	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
132 		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
133 }
134 
135 static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
136 {
137 	struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, irq_tasklet);
138 	struct ccw_device *cdev = qdio->adapter->ccw_device;
139 	unsigned int start, error;
140 	int completed;
141 
142 	/* Check the Response Queue, and kick off the Request Queue tasklet: */
143 	completed = qdio_get_next_buffers(cdev, 0, &start, &error);
144 	if (completed < 0)
145 		return;
146 	if (completed > 0)
147 		zfcp_qdio_int_resp(cdev, error, 0, start, completed,
148 				   (unsigned long) qdio);
149 
150 	if (qdio_start_irq(cdev))
151 		/* More work pending: */
152 		tasklet_schedule(&qdio->irq_tasklet);
153 }
154 
155 static void zfcp_qdio_poll(struct ccw_device *cdev, unsigned long data)
156 {
157 	struct zfcp_qdio *qdio = (struct zfcp_qdio *) data;
158 
159 	tasklet_schedule(&qdio->irq_tasklet);
160 }
161 
162 static struct qdio_buffer_element *
163 zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
164 {
165 	struct qdio_buffer_element *sbale;
166 
167 	/* set last entry flag in current SBALE of current SBAL */
168 	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
169 	sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
170 
171 	/* don't exceed last allowed SBAL */
172 	if (q_req->sbal_last == q_req->sbal_limit)
173 		return NULL;
174 
175 	/* set chaining flag in first SBALE of current SBAL */
176 	sbale = zfcp_qdio_sbale_req(qdio, q_req);
177 	sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
178 
179 	/* calculate index of next SBAL */
180 	q_req->sbal_last++;
181 	q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
182 
183 	/* keep this requests number of SBALs up-to-date */
184 	q_req->sbal_number++;
185 	BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
186 
187 	/* start at first SBALE of new SBAL */
188 	q_req->sbale_curr = 0;
189 
190 	/* set storage-block type for new SBAL */
191 	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
192 	sbale->sflags |= q_req->sbtype;
193 
194 	return sbale;
195 }
196 
197 static struct qdio_buffer_element *
198 zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
199 {
200 	if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
201 		return zfcp_qdio_sbal_chain(qdio, q_req);
202 	q_req->sbale_curr++;
203 	return zfcp_qdio_sbale_curr(qdio, q_req);
204 }
205 
206 /**
207  * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
208  * @qdio: pointer to struct zfcp_qdio
209  * @q_req: pointer to struct zfcp_qdio_req
210  * @sg: scatter-gather list
211  * Returns: zero or -EINVAL on error
212  */
213 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
214 			    struct scatterlist *sg)
215 {
216 	struct qdio_buffer_element *sbale;
217 
218 	/* set storage-block type for this request */
219 	sbale = zfcp_qdio_sbale_req(qdio, q_req);
220 	sbale->sflags |= q_req->sbtype;
221 
222 	for (; sg; sg = sg_next(sg)) {
223 		sbale = zfcp_qdio_sbale_next(qdio, q_req);
224 		if (!sbale) {
225 			atomic_inc(&qdio->req_q_full);
226 			zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
227 					     q_req->sbal_number);
228 			return -EINVAL;
229 		}
230 		sbale->addr = sg_phys(sg);
231 		sbale->length = sg->length;
232 	}
233 	return 0;
234 }
235 
236 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
237 {
238 	if (atomic_read(&qdio->req_q_free) ||
239 	    !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
240 		return 1;
241 	return 0;
242 }
243 
244 /**
245  * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
246  * @qdio: pointer to struct zfcp_qdio
247  *
248  * The req_q_lock must be held by the caller of this function, and
249  * this function may only be called from process context; it will
250  * sleep when waiting for a free sbal.
251  *
252  * Returns: 0 on success, -EIO if there is no free sbal after waiting.
253  */
254 int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
255 {
256 	long ret;
257 
258 	ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
259 		       zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
260 
261 	if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
262 		return -EIO;
263 
264 	if (ret > 0)
265 		return 0;
266 
267 	if (!ret) {
268 		atomic_inc(&qdio->req_q_full);
269 		/* assume hanging outbound queue, try queue recovery */
270 		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
271 	}
272 
273 	return -EIO;
274 }
275 
276 /**
277  * zfcp_qdio_send - send req to QDIO
278  * @qdio: pointer to struct zfcp_qdio
279  * @q_req: pointer to struct zfcp_qdio_req
280  * Returns: 0 on success, error otherwise
281  */
282 int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
283 {
284 	int retval;
285 	u8 sbal_number = q_req->sbal_number;
286 
287 	/*
288 	 * This should actually be a spin_lock_bh(stat_lock), to protect against
289 	 * zfcp_qdio_int_req() in tasklet context.
290 	 * But we can't do so (and are safe), as we always get called with IRQs
291 	 * disabled by spin_lock_irq[save](req_q_lock).
292 	 */
293 	lockdep_assert_irqs_disabled();
294 	spin_lock(&qdio->stat_lock);
295 	zfcp_qdio_account(qdio);
296 	spin_unlock(&qdio->stat_lock);
297 
298 	atomic_sub(sbal_number, &qdio->req_q_free);
299 
300 	retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
301 			 q_req->sbal_first, sbal_number);
302 
303 	if (unlikely(retval)) {
304 		/* Failed to submit the IO, roll back our modifications. */
305 		atomic_add(sbal_number, &qdio->req_q_free);
306 		zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
307 				     sbal_number);
308 		return retval;
309 	}
310 
311 	/* account for transferred buffers */
312 	qdio->req_q_idx += sbal_number;
313 	qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
314 
315 	return 0;
316 }
317 
318 /**
319  * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
320  * @qdio: pointer to struct zfcp_qdio
321  * Returns: -ENOMEM on memory allocation error or return value from
322  *          qdio_allocate
323  */
324 static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
325 {
326 	int ret;
327 
328 	ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
329 	if (ret)
330 		return -ENOMEM;
331 
332 	ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
333 	if (ret)
334 		goto free_req_q;
335 
336 	init_waitqueue_head(&qdio->req_q_wq);
337 
338 	ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
339 	if (ret)
340 		goto free_res_q;
341 
342 	return 0;
343 
344 free_res_q:
345 	qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
346 free_req_q:
347 	qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
348 	return ret;
349 }
350 
351 /**
352  * zfcp_close_qdio - close qdio queues for an adapter
353  * @qdio: pointer to structure zfcp_qdio
354  */
355 void zfcp_qdio_close(struct zfcp_qdio *qdio)
356 {
357 	struct zfcp_adapter *adapter = qdio->adapter;
358 	int idx, count;
359 
360 	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
361 		return;
362 
363 	/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
364 	spin_lock_irq(&qdio->req_q_lock);
365 	atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
366 	spin_unlock_irq(&qdio->req_q_lock);
367 
368 	wake_up(&qdio->req_q_wq);
369 
370 	tasklet_disable(&qdio->irq_tasklet);
371 	qdio_stop_irq(adapter->ccw_device);
372 	qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
373 
374 	/* cleanup used outbound sbals */
375 	count = atomic_read(&qdio->req_q_free);
376 	if (count < QDIO_MAX_BUFFERS_PER_Q) {
377 		idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
378 		count = QDIO_MAX_BUFFERS_PER_Q - count;
379 		zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
380 	}
381 	qdio->req_q_idx = 0;
382 	atomic_set(&qdio->req_q_free, 0);
383 }
384 
385 void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
386 			    const struct zfcp_qdio *const qdio)
387 {
388 	struct Scsi_Host *const shost = adapter->scsi_host;
389 
390 	if (shost == NULL)
391 		return;
392 
393 	shost->sg_tablesize = qdio->max_sbale_per_req;
394 	shost->max_sectors = qdio->max_sbale_per_req * 8;
395 }
396 
397 /**
398  * zfcp_qdio_open - prepare and initialize response queue
399  * @qdio: pointer to struct zfcp_qdio
400  * Returns: 0 on success, otherwise -EIO
401  */
402 int zfcp_qdio_open(struct zfcp_qdio *qdio)
403 {
404 	struct qdio_buffer **input_sbals[1] = {qdio->res_q};
405 	struct qdio_buffer **output_sbals[1] = {qdio->req_q};
406 	struct qdio_buffer_element *sbale;
407 	struct qdio_initialize init_data = {0};
408 	struct zfcp_adapter *adapter = qdio->adapter;
409 	struct ccw_device *cdev = adapter->ccw_device;
410 	struct qdio_ssqd_desc ssqd;
411 	int cc;
412 
413 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
414 		return -EIO;
415 
416 	atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
417 			  &qdio->adapter->status);
418 
419 	init_data.q_format = QDIO_ZFCP_QFMT;
420 	init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
421 	if (enable_multibuffer)
422 		init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
423 	init_data.no_input_qs = 1;
424 	init_data.no_output_qs = 1;
425 	init_data.input_handler = zfcp_qdio_int_resp;
426 	init_data.output_handler = zfcp_qdio_int_req;
427 	init_data.irq_poll = zfcp_qdio_poll;
428 	init_data.int_parm = (unsigned long) qdio;
429 	init_data.input_sbal_addr_array = input_sbals;
430 	init_data.output_sbal_addr_array = output_sbals;
431 	init_data.scan_threshold =
432 		QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
433 
434 	if (qdio_establish(cdev, &init_data))
435 		goto failed_establish;
436 
437 	if (qdio_get_ssqd_desc(cdev, &ssqd))
438 		goto failed_qdio;
439 
440 	if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
441 		atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
442 				&qdio->adapter->status);
443 
444 	if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
445 		atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
446 		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
447 	} else {
448 		atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
449 		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
450 	}
451 
452 	qdio->max_sbale_per_req =
453 		ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
454 		- 2;
455 	if (qdio_activate(cdev))
456 		goto failed_qdio;
457 
458 	for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
459 		sbale = &(qdio->res_q[cc]->element[0]);
460 		sbale->length = 0;
461 		sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
462 		sbale->sflags = 0;
463 		sbale->addr = 0;
464 	}
465 
466 	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
467 		goto failed_qdio;
468 
469 	/* set index of first available SBALS / number of available SBALS */
470 	qdio->req_q_idx = 0;
471 	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
472 	atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
473 
474 	/* Enable processing for QDIO interrupts: */
475 	tasklet_enable(&qdio->irq_tasklet);
476 	/* This results in a qdio_start_irq(): */
477 	tasklet_schedule(&qdio->irq_tasklet);
478 
479 	zfcp_qdio_shost_update(adapter, qdio);
480 
481 	return 0;
482 
483 failed_qdio:
484 	qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
485 failed_establish:
486 	dev_err(&cdev->dev,
487 		"Setting up the QDIO connection to the FCP adapter failed\n");
488 	return -EIO;
489 }
490 
491 void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
492 {
493 	if (!qdio)
494 		return;
495 
496 	tasklet_kill(&qdio->irq_tasklet);
497 
498 	if (qdio->adapter->ccw_device)
499 		qdio_free(qdio->adapter->ccw_device);
500 
501 	qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
502 	qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
503 	kfree(qdio);
504 }
505 
506 int zfcp_qdio_setup(struct zfcp_adapter *adapter)
507 {
508 	struct zfcp_qdio *qdio;
509 
510 	qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
511 	if (!qdio)
512 		return -ENOMEM;
513 
514 	qdio->adapter = adapter;
515 
516 	if (zfcp_qdio_allocate(qdio)) {
517 		kfree(qdio);
518 		return -ENOMEM;
519 	}
520 
521 	spin_lock_init(&qdio->req_q_lock);
522 	spin_lock_init(&qdio->stat_lock);
523 	tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
524 	tasklet_disable(&qdio->irq_tasklet);
525 
526 	adapter->qdio = qdio;
527 	return 0;
528 }
529 
530 /**
531  * zfcp_qdio_siosl - Trigger logging in FCP channel
532  * @adapter: The zfcp_adapter where to trigger logging
533  *
534  * Call the cio siosl function to trigger hardware logging.  This
535  * wrapper function sets a flag to ensure hardware logging is only
536  * triggered once before going through qdio shutdown.
537  *
538  * The triggers are always run from qdio tasklet context, so no
539  * additional synchronization is necessary.
540  */
541 void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
542 {
543 	int rc;
544 
545 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
546 		return;
547 
548 	rc = ccw_device_siosl(adapter->ccw_device);
549 	if (!rc)
550 		atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
551 				&adapter->status);
552 }
553