xref: /linux/drivers/s390/scsi/zfcp_qdio.h (revision 37744feebc086908fd89760650f458ab19071750)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * zfcp device driver
4  *
5  * Header file for zfcp qdio interface
6  *
7  * Copyright IBM Corp. 2010
8  */
9 
10 #ifndef ZFCP_QDIO_H
11 #define ZFCP_QDIO_H
12 
13 #include <asm/qdio.h>
14 
15 #define ZFCP_QDIO_SBALE_LEN	PAGE_SIZE
16 
17 /* Max SBALS for chaining */
18 #define ZFCP_QDIO_MAX_SBALS_PER_REQ	36
19 
20 /**
21  * struct zfcp_qdio - basic qdio data structure
22  * @res_q: response queue
23  * @req_q: request queue
24  * @req_q_idx: index of next free buffer
25  * @req_q_free: number of free buffers in queue
26  * @stat_lock: lock to protect req_q_util and req_q_time
27  * @req_q_lock: lock to serialize access to request queue
28  * @req_q_time: time of last fill level change
29  * @req_q_util: used for accounting
30  * @req_q_full: queue full incidents
31  * @req_q_wq: used to wait for SBAL availability
32  * @adapter: adapter used in conjunction with this qdio structure
33  * @max_sbale_per_sbal: qdio limit per sbal
34  * @max_sbale_per_req: qdio limit per request
35  */
36 struct zfcp_qdio {
37 	struct qdio_buffer	*res_q[QDIO_MAX_BUFFERS_PER_Q];
38 	struct qdio_buffer	*req_q[QDIO_MAX_BUFFERS_PER_Q];
39 	u8			req_q_idx;
40 	atomic_t		req_q_free;
41 	spinlock_t		stat_lock;
42 	spinlock_t		req_q_lock;
43 	unsigned long long	req_q_time;
44 	u64			req_q_util;
45 	atomic_t		req_q_full;
46 	wait_queue_head_t	req_q_wq;
47 	struct zfcp_adapter	*adapter;
48 	u16			max_sbale_per_sbal;
49 	u16			max_sbale_per_req;
50 };
51 
52 /**
53  * struct zfcp_qdio_req - qdio queue related values for a request
54  * @sbtype: sbal type flags for sbale 0
55  * @sbal_number: number of free sbals
56  * @sbal_first: first sbal for this request
57  * @sbal_last: last sbal for this request
58  * @sbal_limit: last possible sbal for this request
59  * @sbale_curr: current sbale at creation of this request
60  * @qdio_outb_usage: usage of outbound queue
61  */
62 struct zfcp_qdio_req {
63 	u8	sbtype;
64 	u8	sbal_number;
65 	u8	sbal_first;
66 	u8	sbal_last;
67 	u8	sbal_limit;
68 	u8	sbale_curr;
69 	u16	qdio_outb_usage;
70 };
71 
72 /**
73  * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
74  * @qdio: pointer to struct zfcp_qdio
75  * @q_req: pointer to struct zfcp_qdio_req
76  * Returns: pointer to qdio_buffer_element (sbale) structure
77  */
78 static inline struct qdio_buffer_element *
79 zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
80 {
81 	return &qdio->req_q[q_req->sbal_last]->element[0];
82 }
83 
84 /**
85  * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
86  * @qdio: pointer to struct zfcp_qdio
87  * @q_req: pointer to struct zfcp_qdio_req
88  * Returns: pointer to qdio_buffer_element (sbale) structure
89  */
90 static inline struct qdio_buffer_element *
91 zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
92 {
93 	return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
94 }
95 
96 /**
97  * zfcp_qdio_req_init - initialize qdio request
98  * @qdio: request queue where to start putting the request
99  * @q_req: the qdio request to start
100  * @req_id: The request id
101  * @sbtype: type flags to set for all sbals
102  * @data: First data block
103  * @len: Length of first data block
104  *
105  * This is the start of putting the request into the queue, the last
106  * step is passing the request to zfcp_qdio_send. The request queue
107  * lock must be held during the whole process from init to send.
108  */
109 static inline
110 void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
111 			unsigned long req_id, u8 sbtype, void *data, u32 len)
112 {
113 	struct qdio_buffer_element *sbale;
114 	int count = min(atomic_read(&qdio->req_q_free),
115 			ZFCP_QDIO_MAX_SBALS_PER_REQ);
116 
117 	q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
118 	q_req->sbal_number = 1;
119 	q_req->sbtype = sbtype;
120 	q_req->sbale_curr = 1;
121 	q_req->sbal_limit = (q_req->sbal_first + count - 1)
122 					% QDIO_MAX_BUFFERS_PER_Q;
123 
124 	sbale = zfcp_qdio_sbale_req(qdio, q_req);
125 	sbale->addr = req_id;
126 	sbale->eflags = 0;
127 	sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
128 
129 	if (unlikely(!data))
130 		return;
131 	sbale++;
132 	sbale->addr = virt_to_phys(data);
133 	sbale->length = len;
134 }
135 
136 /**
137  * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
138  * @qdio: pointer to struct zfcp_qdio
139  * @q_req: pointer to struct zfcp_queue_req
140  * @data: pointer to data
141  * @len: length of data
142  *
143  * This is only required for single sbal requests, calling it when
144  * wrapping around to the next sbal is a bug.
145  */
146 static inline
147 void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
148 			 void *data, u32 len)
149 {
150 	struct qdio_buffer_element *sbale;
151 
152 	BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
153 	q_req->sbale_curr++;
154 	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
155 	sbale->addr = virt_to_phys(data);
156 	sbale->length = len;
157 }
158 
159 /**
160  * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
161  * @qdio: pointer to struct zfcp_qdio
162  * @q_req: pointer to struct zfcp_queue_req
163  */
164 static inline
165 void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
166 			      struct zfcp_qdio_req *q_req)
167 {
168 	struct qdio_buffer_element *sbale;
169 
170 	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
171 	sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
172 }
173 
174 /**
175  * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
176  * @sg: The scatterlist where to check the data size
177  *
178  * Returns: 1 when one sbale is enough for the data in the scatterlist,
179  *	    0 if not.
180  */
181 static inline
182 int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
183 {
184 	return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
185 }
186 
187 /**
188  * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
189  * @qdio: pointer to struct zfcp_qdio
190  * @q_req: The current zfcp_qdio_req
191  */
192 static inline
193 void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio,
194 				  struct zfcp_qdio_req *q_req)
195 {
196 	q_req->sbale_curr = qdio->max_sbale_per_sbal - 1;
197 }
198 
199 /**
200  * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
201  * @qdio: pointer to struct zfcp_qdio
202  * @q_req: The current zfcp_qdio_req
203  * @max_sbals: maximum number of SBALs allowed
204  */
205 static inline
206 void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
207 			  struct zfcp_qdio_req *q_req, int max_sbals)
208 {
209 	int count = min(atomic_read(&qdio->req_q_free), max_sbals);
210 
211 	q_req->sbal_limit = (q_req->sbal_first + count - 1) %
212 				QDIO_MAX_BUFFERS_PER_Q;
213 }
214 
215 /**
216  * zfcp_qdio_set_data_div - set data division count
217  * @qdio: pointer to struct zfcp_qdio
218  * @q_req: The current zfcp_qdio_req
219  * @count: The data division count
220  */
221 static inline
222 void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
223 			    struct zfcp_qdio_req *q_req, u32 count)
224 {
225 	struct qdio_buffer_element *sbale;
226 
227 	sbale = qdio->req_q[q_req->sbal_first]->element;
228 	sbale->length = count;
229 }
230 
231 /**
232  * zfcp_qdio_real_bytes - count bytes used
233  * @sg: pointer to struct scatterlist
234  */
235 static inline
236 unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg)
237 {
238 	unsigned int real_bytes = 0;
239 
240 	for (; sg; sg = sg_next(sg))
241 		real_bytes += sg->length;
242 
243 	return real_bytes;
244 }
245 
246 /**
247  * zfcp_qdio_set_scount - set SBAL count value
248  * @qdio: pointer to struct zfcp_qdio
249  * @q_req: The current zfcp_qdio_req
250  */
251 static inline
252 void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
253 {
254 	struct qdio_buffer_element *sbale;
255 
256 	sbale = qdio->req_q[q_req->sbal_first]->element;
257 	sbale->scount = q_req->sbal_number - 1;
258 }
259 
260 #endif /* ZFCP_QDIO_H */
261