xref: /linux/drivers/s390/scsi/zfcp_qdio.c (revision be54f8c558027a218423134dd9b8c7c46d92204a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * zfcp device driver
4  *
5  * Setup and helper functions to access QDIO.
6  *
7  * Copyright IBM Corp. 2002, 2020
8  */
9 
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 
13 #include <linux/lockdep.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include "zfcp_ext.h"
17 #include "zfcp_qdio.h"
18 
19 static bool enable_multibuffer = true;
20 module_param_named(datarouter, enable_multibuffer, bool, 0400);
21 MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
22 
23 #define ZFCP_QDIO_REQUEST_RESCAN_MSECS	(MSEC_PER_SEC * 10)
24 #define ZFCP_QDIO_REQUEST_SCAN_MSECS	MSEC_PER_SEC
25 
zfcp_qdio_handler_error(struct zfcp_qdio * qdio,char * dbftag,unsigned int qdio_err)26 static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
27 				    unsigned int qdio_err)
28 {
29 	struct zfcp_adapter *adapter = qdio->adapter;
30 
31 	dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
32 
33 	if (qdio_err & QDIO_ERROR_SLSB_STATE) {
34 		zfcp_qdio_siosl(adapter);
35 		zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
36 		return;
37 	}
38 	zfcp_erp_adapter_reopen(adapter,
39 				ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
40 				ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
41 }
42 
zfcp_qdio_zero_sbals(struct qdio_buffer * sbal[],int first,int cnt)43 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
44 {
45 	int i, sbal_idx;
46 
47 	for (i = first; i < first + cnt; i++) {
48 		sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
49 		memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
50 	}
51 }
52 
53 /* this needs to be called prior to updating the queue fill level */
zfcp_qdio_account(struct zfcp_qdio * qdio)54 static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
55 {
56 	unsigned long long now, span;
57 	int used;
58 
59 	now = get_tod_clock_monotonic();
60 	span = (now - qdio->req_q_time) >> 12;
61 	used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
62 	qdio->req_q_util += used * span;
63 	qdio->req_q_time = now;
64 }
65 
zfcp_qdio_int_req(struct ccw_device * cdev,unsigned int qdio_err,int queue_no,int idx,int count,unsigned long parm)66 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
67 			      int queue_no, int idx, int count,
68 			      unsigned long parm)
69 {
70 	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
71 
72 	zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
73 }
74 
zfcp_qdio_request_tasklet(struct tasklet_struct * tasklet)75 static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
76 {
77 	struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, request_tasklet);
78 	struct ccw_device *cdev = qdio->adapter->ccw_device;
79 	unsigned int start, error;
80 	int completed;
81 
82 	completed = qdio_inspect_output_queue(cdev, 0, &start, &error);
83 	if (completed > 0) {
84 		if (error) {
85 			zfcp_qdio_handler_error(qdio, "qdreqt1", error);
86 		} else {
87 			/* cleanup all SBALs being program-owned now */
88 			zfcp_qdio_zero_sbals(qdio->req_q, start, completed);
89 
90 			spin_lock_irq(&qdio->stat_lock);
91 			zfcp_qdio_account(qdio);
92 			spin_unlock_irq(&qdio->stat_lock);
93 			atomic_add(completed, &qdio->req_q_free);
94 			wake_up(&qdio->req_q_wq);
95 		}
96 	}
97 
98 	if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
99 		timer_reduce(&qdio->request_timer,
100 			     jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_RESCAN_MSECS));
101 }
102 
zfcp_qdio_request_timer(struct timer_list * timer)103 static void zfcp_qdio_request_timer(struct timer_list *timer)
104 {
105 	struct zfcp_qdio *qdio = timer_container_of(qdio, timer,
106 						    request_timer);
107 
108 	tasklet_schedule(&qdio->request_tasklet);
109 }
110 
zfcp_qdio_int_resp(struct ccw_device * cdev,unsigned int qdio_err,int queue_no,int idx,int count,unsigned long parm)111 static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
112 			       int queue_no, int idx, int count,
113 			       unsigned long parm)
114 {
115 	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
116 	struct zfcp_adapter *adapter = qdio->adapter;
117 	int sbal_no, sbal_idx;
118 
119 	if (unlikely(qdio_err)) {
120 		if (zfcp_adapter_multi_buffer_active(adapter)) {
121 			void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
122 			struct qdio_buffer_element *sbale;
123 			u64 req_id;
124 			u8 scount;
125 
126 			memset(pl, 0,
127 			       ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
128 			sbale = qdio->res_q[idx]->element;
129 			req_id = dma64_to_u64(sbale->addr);
130 			scount = min(sbale->scount + 1,
131 				     ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
132 				     /* incl. signaling SBAL */
133 
134 			for (sbal_no = 0; sbal_no < scount; sbal_no++) {
135 				sbal_idx = (idx + sbal_no) %
136 					QDIO_MAX_BUFFERS_PER_Q;
137 				pl[sbal_no] = qdio->res_q[sbal_idx];
138 			}
139 			zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
140 		}
141 		zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
142 		return;
143 	}
144 
145 	/*
146 	 * go through all SBALs from input queue currently
147 	 * returned by QDIO layer
148 	 */
149 	for (sbal_no = 0; sbal_no < count; sbal_no++) {
150 		sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
151 		/* go through all SBALEs of SBAL */
152 		zfcp_fsf_reqid_check(qdio, sbal_idx);
153 	}
154 
155 	/*
156 	 * put SBALs back to response queue
157 	 */
158 	if (qdio_add_bufs_to_input_queue(cdev, 0, idx, count))
159 		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
160 }
161 
zfcp_qdio_irq_tasklet(struct tasklet_struct * tasklet)162 static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
163 {
164 	struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, irq_tasklet);
165 	struct ccw_device *cdev = qdio->adapter->ccw_device;
166 	unsigned int start, error;
167 	int completed;
168 
169 	if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
170 		tasklet_schedule(&qdio->request_tasklet);
171 
172 	/* Check the Response Queue: */
173 	completed = qdio_inspect_input_queue(cdev, 0, &start, &error);
174 	if (completed < 0)
175 		return;
176 	if (completed > 0)
177 		zfcp_qdio_int_resp(cdev, error, 0, start, completed,
178 				   (unsigned long) qdio);
179 
180 	if (qdio_start_irq(cdev))
181 		/* More work pending: */
182 		tasklet_schedule(&qdio->irq_tasklet);
183 }
184 
zfcp_qdio_poll(struct ccw_device * cdev,unsigned long data)185 static void zfcp_qdio_poll(struct ccw_device *cdev, unsigned long data)
186 {
187 	struct zfcp_qdio *qdio = (struct zfcp_qdio *) data;
188 
189 	tasklet_schedule(&qdio->irq_tasklet);
190 }
191 
192 static struct qdio_buffer_element *
zfcp_qdio_sbal_chain(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)193 zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
194 {
195 	struct qdio_buffer_element *sbale;
196 
197 	/* set last entry flag in current SBALE of current SBAL */
198 	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
199 	sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
200 
201 	/* don't exceed last allowed SBAL */
202 	if (q_req->sbal_last == q_req->sbal_limit)
203 		return NULL;
204 
205 	/* set chaining flag in first SBALE of current SBAL */
206 	sbale = zfcp_qdio_sbale_req(qdio, q_req);
207 	sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
208 
209 	/* calculate index of next SBAL */
210 	q_req->sbal_last++;
211 	q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
212 
213 	/* keep this requests number of SBALs up-to-date */
214 	q_req->sbal_number++;
215 	BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
216 
217 	/* start at first SBALE of new SBAL */
218 	q_req->sbale_curr = 0;
219 
220 	/* set storage-block type for new SBAL */
221 	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
222 	sbale->sflags |= q_req->sbtype;
223 
224 	return sbale;
225 }
226 
227 static struct qdio_buffer_element *
zfcp_qdio_sbale_next(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)228 zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
229 {
230 	if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
231 		return zfcp_qdio_sbal_chain(qdio, q_req);
232 	q_req->sbale_curr++;
233 	return zfcp_qdio_sbale_curr(qdio, q_req);
234 }
235 
236 /**
237  * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
238  * @qdio: pointer to struct zfcp_qdio
239  * @q_req: pointer to struct zfcp_qdio_req
240  * @sg: scatter-gather list
241  * Returns: zero or -EINVAL on error
242  */
zfcp_qdio_sbals_from_sg(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,struct scatterlist * sg)243 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
244 			    struct scatterlist *sg)
245 {
246 	struct qdio_buffer_element *sbale;
247 
248 	/* set storage-block type for this request */
249 	sbale = zfcp_qdio_sbale_req(qdio, q_req);
250 	sbale->sflags |= q_req->sbtype;
251 
252 	for (; sg; sg = sg_next(sg)) {
253 		sbale = zfcp_qdio_sbale_next(qdio, q_req);
254 		if (!sbale) {
255 			atomic_inc(&qdio->req_q_full);
256 			zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
257 					     q_req->sbal_number);
258 			return -EINVAL;
259 		}
260 		sbale->addr = u64_to_dma64(sg_phys(sg));
261 		sbale->length = sg->length;
262 	}
263 	return 0;
264 }
265 
zfcp_qdio_sbal_check(struct zfcp_qdio * qdio)266 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
267 {
268 	if (atomic_read(&qdio->req_q_free) ||
269 	    !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
270 		return 1;
271 	return 0;
272 }
273 
274 /**
275  * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
276  * @qdio: pointer to struct zfcp_qdio
277  *
278  * The req_q_lock must be held by the caller of this function, and
279  * this function may only be called from process context; it will
280  * sleep when waiting for a free sbal.
281  *
282  * Returns: 0 on success, -EIO if there is no free sbal after waiting.
283  */
zfcp_qdio_sbal_get(struct zfcp_qdio * qdio)284 int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
285 {
286 	long ret;
287 
288 	ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
289 		       zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
290 
291 	if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
292 		return -EIO;
293 
294 	if (ret > 0)
295 		return 0;
296 
297 	if (!ret) {
298 		atomic_inc(&qdio->req_q_full);
299 		/* assume hanging outbound queue, try queue recovery */
300 		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
301 	}
302 
303 	return -EIO;
304 }
305 
306 /**
307  * zfcp_qdio_send - send req to QDIO
308  * @qdio: pointer to struct zfcp_qdio
309  * @q_req: pointer to struct zfcp_qdio_req
310  * Returns: 0 on success, error otherwise
311  */
zfcp_qdio_send(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)312 int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
313 {
314 	int retval;
315 	u8 sbal_number = q_req->sbal_number;
316 
317 	/*
318 	 * This should actually be a spin_lock_bh(stat_lock), to protect against
319 	 * Request Queue completion processing in tasklet context.
320 	 * But we can't do so (and are safe), as we always get called with IRQs
321 	 * disabled by spin_lock_irq[save](req_q_lock).
322 	 */
323 	lockdep_assert_irqs_disabled();
324 	spin_lock(&qdio->stat_lock);
325 	zfcp_qdio_account(qdio);
326 	spin_unlock(&qdio->stat_lock);
327 
328 	atomic_sub(sbal_number, &qdio->req_q_free);
329 
330 	retval = qdio_add_bufs_to_output_queue(qdio->adapter->ccw_device, 0,
331 					       q_req->sbal_first, sbal_number,
332 					       NULL);
333 
334 	if (unlikely(retval)) {
335 		/* Failed to submit the IO, roll back our modifications. */
336 		atomic_add(sbal_number, &qdio->req_q_free);
337 		zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
338 				     sbal_number);
339 		return retval;
340 	}
341 
342 	if (atomic_read(&qdio->req_q_free) <= 2 * ZFCP_QDIO_MAX_SBALS_PER_REQ)
343 		tasklet_schedule(&qdio->request_tasklet);
344 	else
345 		timer_reduce(&qdio->request_timer,
346 			     jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_SCAN_MSECS));
347 
348 	/* account for transferred buffers */
349 	qdio->req_q_idx += sbal_number;
350 	qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
351 
352 	return 0;
353 }
354 
355 /**
356  * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
357  * @qdio: pointer to struct zfcp_qdio
358  * Returns: -ENOMEM on memory allocation error or return value from
359  *          qdio_allocate
360  */
zfcp_qdio_allocate(struct zfcp_qdio * qdio)361 static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
362 {
363 	int ret;
364 
365 	ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
366 	if (ret)
367 		return -ENOMEM;
368 
369 	ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
370 	if (ret)
371 		goto free_req_q;
372 
373 	init_waitqueue_head(&qdio->req_q_wq);
374 
375 	ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
376 	if (ret)
377 		goto free_res_q;
378 
379 	return 0;
380 
381 free_res_q:
382 	qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
383 free_req_q:
384 	qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
385 	return ret;
386 }
387 
388 /**
389  * zfcp_qdio_close - close qdio queues for an adapter
390  * @qdio: pointer to structure zfcp_qdio
391  */
zfcp_qdio_close(struct zfcp_qdio * qdio)392 void zfcp_qdio_close(struct zfcp_qdio *qdio)
393 {
394 	struct zfcp_adapter *adapter = qdio->adapter;
395 	int idx, count;
396 
397 	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
398 		return;
399 
400 	/*
401 	 * Clear QDIOUP flag, thus qdio_add_bufs_to_output_queue() is not called
402 	 * during qdio_shutdown().
403 	 */
404 	spin_lock_irq(&qdio->req_q_lock);
405 	atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
406 	spin_unlock_irq(&qdio->req_q_lock);
407 
408 	wake_up(&qdio->req_q_wq);
409 
410 	tasklet_disable(&qdio->irq_tasklet);
411 	tasklet_disable(&qdio->request_tasklet);
412 	timer_delete_sync(&qdio->request_timer);
413 	qdio_stop_irq(adapter->ccw_device);
414 	qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
415 
416 	/* cleanup used outbound sbals */
417 	count = atomic_read(&qdio->req_q_free);
418 	if (count < QDIO_MAX_BUFFERS_PER_Q) {
419 		idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
420 		count = QDIO_MAX_BUFFERS_PER_Q - count;
421 		zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
422 	}
423 	qdio->req_q_idx = 0;
424 	atomic_set(&qdio->req_q_free, 0);
425 }
426 
zfcp_qdio_shost_update(struct zfcp_adapter * const adapter,const struct zfcp_qdio * const qdio)427 void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
428 			    const struct zfcp_qdio *const qdio)
429 {
430 	struct Scsi_Host *const shost = adapter->scsi_host;
431 
432 	if (shost == NULL)
433 		return;
434 
435 	shost->sg_tablesize = qdio->max_sbale_per_req;
436 	shost->max_sectors = qdio->max_sbale_per_req * 8;
437 }
438 
439 /**
440  * zfcp_qdio_open - prepare and initialize response queue
441  * @qdio: pointer to struct zfcp_qdio
442  * Returns: 0 on success, otherwise -EIO
443  */
zfcp_qdio_open(struct zfcp_qdio * qdio)444 int zfcp_qdio_open(struct zfcp_qdio *qdio)
445 {
446 	struct qdio_buffer **input_sbals[1] = {qdio->res_q};
447 	struct qdio_buffer **output_sbals[1] = {qdio->req_q};
448 	struct qdio_buffer_element *sbale;
449 	struct qdio_initialize init_data = {0};
450 	struct zfcp_adapter *adapter = qdio->adapter;
451 	struct ccw_device *cdev = adapter->ccw_device;
452 	struct qdio_ssqd_desc ssqd;
453 	int cc;
454 
455 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
456 		return -EIO;
457 
458 	atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
459 			  &qdio->adapter->status);
460 
461 	init_data.q_format = QDIO_ZFCP_QFMT;
462 	init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
463 	if (enable_multibuffer)
464 		init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
465 	init_data.no_input_qs = 1;
466 	init_data.no_output_qs = 1;
467 	init_data.input_handler = zfcp_qdio_int_resp;
468 	init_data.output_handler = zfcp_qdio_int_req;
469 	init_data.irq_poll = zfcp_qdio_poll;
470 	init_data.int_parm = (unsigned long) qdio;
471 	init_data.input_sbal_addr_array = input_sbals;
472 	init_data.output_sbal_addr_array = output_sbals;
473 
474 	if (qdio_establish(cdev, &init_data))
475 		goto failed_establish;
476 
477 	if (qdio_get_ssqd_desc(cdev, &ssqd))
478 		goto failed_qdio;
479 
480 	if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
481 		atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
482 				&qdio->adapter->status);
483 
484 	if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
485 		atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
486 		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
487 	} else {
488 		atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
489 		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
490 	}
491 
492 	qdio->max_sbale_per_req =
493 		ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
494 		- 2;
495 	if (qdio_activate(cdev))
496 		goto failed_qdio;
497 
498 	for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
499 		sbale = &(qdio->res_q[cc]->element[0]);
500 		sbale->length = 0;
501 		sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
502 		sbale->sflags = 0;
503 		sbale->addr = 0;
504 	}
505 
506 	if (qdio_add_bufs_to_input_queue(cdev, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
507 		goto failed_qdio;
508 
509 	/* set index of first available SBALS / number of available SBALS */
510 	qdio->req_q_idx = 0;
511 	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
512 	atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
513 
514 	/* Enable processing for Request Queue completions: */
515 	tasklet_enable(&qdio->request_tasklet);
516 	/* Enable processing for QDIO interrupts: */
517 	tasklet_enable(&qdio->irq_tasklet);
518 	/* This results in a qdio_start_irq(): */
519 	tasklet_schedule(&qdio->irq_tasklet);
520 
521 	zfcp_qdio_shost_update(adapter, qdio);
522 
523 	return 0;
524 
525 failed_qdio:
526 	qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
527 failed_establish:
528 	dev_err(&cdev->dev,
529 		"Setting up the QDIO connection to the FCP adapter failed\n");
530 	return -EIO;
531 }
532 
zfcp_qdio_destroy(struct zfcp_qdio * qdio)533 void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
534 {
535 	if (!qdio)
536 		return;
537 
538 	tasklet_kill(&qdio->irq_tasklet);
539 	tasklet_kill(&qdio->request_tasklet);
540 
541 	if (qdio->adapter->ccw_device)
542 		qdio_free(qdio->adapter->ccw_device);
543 
544 	qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
545 	qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
546 	kfree(qdio);
547 }
548 
zfcp_qdio_setup(struct zfcp_adapter * adapter)549 int zfcp_qdio_setup(struct zfcp_adapter *adapter)
550 {
551 	struct zfcp_qdio *qdio;
552 
553 	qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
554 	if (!qdio)
555 		return -ENOMEM;
556 
557 	qdio->adapter = adapter;
558 
559 	if (zfcp_qdio_allocate(qdio)) {
560 		kfree(qdio);
561 		return -ENOMEM;
562 	}
563 
564 	spin_lock_init(&qdio->req_q_lock);
565 	spin_lock_init(&qdio->stat_lock);
566 	timer_setup(&qdio->request_timer, zfcp_qdio_request_timer, 0);
567 	tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
568 	tasklet_setup(&qdio->request_tasklet, zfcp_qdio_request_tasklet);
569 	tasklet_disable(&qdio->irq_tasklet);
570 	tasklet_disable(&qdio->request_tasklet);
571 
572 	adapter->qdio = qdio;
573 	return 0;
574 }
575 
576 /**
577  * zfcp_qdio_siosl - Trigger logging in FCP channel
578  * @adapter: The zfcp_adapter where to trigger logging
579  *
580  * Call the cio siosl function to trigger hardware logging.  This
581  * wrapper function sets a flag to ensure hardware logging is only
582  * triggered once before going through qdio shutdown.
583  *
584  * The triggers are always run from qdio tasklet context, so no
585  * additional synchronization is necessary.
586  */
zfcp_qdio_siosl(struct zfcp_adapter * adapter)587 void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
588 {
589 	int rc;
590 
591 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
592 		return;
593 
594 	rc = ccw_device_siosl(adapter->ccw_device);
595 	if (!rc)
596 		atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
597 				&adapter->status);
598 }
599