xref: /linux/drivers/infiniband/ulp/iser/iscsi_iser.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * iSER transport for the Open iSCSI Initiator & iSER transport internals
3  *
4  * Copyright (C) 2004 Dmitry Yusupov
5  * Copyright (C) 2004 Alex Aizman
6  * Copyright (C) 2005 Mike Christie
7  * based on code maintained by open-iscsi@googlegroups.com
8  *
9  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
10  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
11  * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
12  *
13  * This software is available to you under a choice of one of two
14  * licenses.  You may choose to be licensed under the terms of the GNU
15  * General Public License (GPL) Version 2, available from the file
16  * COPYING in the main directory of this source tree, or the
17  * OpenIB.org BSD license below:
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *	- Redistributions of source code must retain the above
24  *	  copyright notice, this list of conditions and the following
25  *	  disclaimer.
26  *
27  *	- Redistributions in binary form must reproduce the above
28  *	  copyright notice, this list of conditions and the following
29  *	  disclaimer in the documentation and/or other materials
30  *	  provided with the distribution.
31  *
32  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
35  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
36  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
37  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
38  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39  * SOFTWARE.
40  */
41 #ifndef __ISCSI_ISER_H__
42 #define __ISCSI_ISER_H__
43 
44 #include <linux/types.h>
45 #include <linux/net.h>
46 #include <linux/printk.h>
47 #include <scsi/libiscsi.h>
48 #include <scsi/scsi_transport_iscsi.h>
49 #include <scsi/scsi_cmnd.h>
50 #include <scsi/scsi_device.h>
51 
52 #include <linux/interrupt.h>
53 #include <linux/wait.h>
54 #include <linux/sched.h>
55 #include <linux/list.h>
56 #include <linux/slab.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/mutex.h>
59 #include <linux/mempool.h>
60 #include <linux/uio.h>
61 
62 #include <linux/socket.h>
63 #include <linux/in.h>
64 #include <linux/in6.h>
65 
66 #include <rdma/ib_verbs.h>
67 #include <rdma/ib_fmr_pool.h>
68 #include <rdma/rdma_cm.h>
69 
70 #define DRV_NAME	"iser"
71 #define PFX		DRV_NAME ": "
72 #define DRV_VER		"1.6"
73 
74 #define iser_dbg(fmt, arg...)				 \
75 	do {						 \
76 		if (unlikely(iser_debug_level > 2))	 \
77 			printk(KERN_DEBUG PFX "%s: " fmt,\
78 				__func__ , ## arg);	 \
79 	} while (0)
80 
81 #define iser_warn(fmt, arg...)				\
82 	do {						\
83 		if (unlikely(iser_debug_level > 0))	\
84 			pr_warn(PFX "%s: " fmt,		\
85 				__func__ , ## arg);	\
86 	} while (0)
87 
88 #define iser_info(fmt, arg...)				\
89 	do {						\
90 		if (unlikely(iser_debug_level > 1))	\
91 			pr_info(PFX "%s: " fmt,		\
92 				__func__ , ## arg);	\
93 	} while (0)
94 
95 #define iser_err(fmt, arg...) \
96 	pr_err(PFX "%s: " fmt, __func__ , ## arg)
97 
98 #define SHIFT_4K	12
99 #define SIZE_4K	(1ULL << SHIFT_4K)
100 #define MASK_4K	(~(SIZE_4K-1))
101 
102 /* Default support is 512KB I/O size */
103 #define ISER_DEF_MAX_SECTORS		1024
104 #define ISCSI_ISER_DEF_SG_TABLESIZE	((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K)
105 /* Maximum support is 8MB I/O size */
106 #define ISCSI_ISER_MAX_SG_TABLESIZE	((16384 * 512) >> SHIFT_4K)
107 
108 #define ISER_DEF_XMIT_CMDS_DEFAULT		512
109 #if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
110 	#define ISER_DEF_XMIT_CMDS_MAX		ISCSI_DEF_XMIT_CMDS_MAX
111 #else
112 	#define ISER_DEF_XMIT_CMDS_MAX		ISER_DEF_XMIT_CMDS_DEFAULT
113 #endif
114 #define ISER_DEF_CMD_PER_LUN		ISER_DEF_XMIT_CMDS_MAX
115 
116 /* QP settings */
117 /* Maximal bounds on received asynchronous PDUs */
118 #define ISER_MAX_RX_MISC_PDUS		4 /* NOOP_IN(2) , ASYNC_EVENT(2)   */
119 
120 #define ISER_MAX_TX_MISC_PDUS		6 /* NOOP_OUT(2), TEXT(1),         *
121 					   * SCSI_TMFUNC(2), LOGOUT(1) */
122 
123 #define ISER_QP_MAX_RECV_DTOS		(ISER_DEF_XMIT_CMDS_MAX)
124 
125 #define ISER_MIN_POSTED_RX		(ISER_DEF_XMIT_CMDS_MAX >> 2)
126 
127 /* the max TX (send) WR supported by the iSER QP is defined by                 *
128  * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect   *
129  * to have at max for SCSI command. The tx posting & completion handling code  *
130  * supports -EAGAIN scheme where tx is suspended till the QP has room for more *
131  * send WR. D=8 comes from 64K/8K                                              */
132 
133 #define ISER_INFLIGHT_DATAOUTS		8
134 
135 #define ISER_QP_MAX_REQ_DTOS		(ISER_DEF_XMIT_CMDS_MAX *    \
136 					(1 + ISER_INFLIGHT_DATAOUTS) + \
137 					ISER_MAX_TX_MISC_PDUS        + \
138 					ISER_MAX_RX_MISC_PDUS)
139 
140 /* Max registration work requests per command */
141 #define ISER_MAX_REG_WR_PER_CMD		5
142 
143 /* For Signature we don't support DATAOUTs so no need to make room for them */
144 #define ISER_QP_SIG_MAX_REQ_DTOS	(ISER_DEF_XMIT_CMDS_MAX	*       \
145 					(1 + ISER_MAX_REG_WR_PER_CMD) + \
146 					ISER_MAX_TX_MISC_PDUS         + \
147 					ISER_MAX_RX_MISC_PDUS)
148 
149 #define ISER_GET_MAX_XMIT_CMDS(send_wr) ((send_wr			\
150 					 - ISER_MAX_TX_MISC_PDUS	\
151 					 - ISER_MAX_RX_MISC_PDUS) /	\
152 					 (1 + ISER_INFLIGHT_DATAOUTS))
153 
154 #define ISER_WC_BATCH_COUNT   16
155 #define ISER_SIGNAL_CMD_COUNT 32
156 
157 #define ISER_VER			0x10
158 #define ISER_WSV			0x08
159 #define ISER_RSV			0x04
160 
161 #define ISER_FASTREG_LI_WRID		0xffffffffffffffffULL
162 #define ISER_BEACON_WRID		0xfffffffffffffffeULL
163 
164 /**
165  * struct iser_hdr - iSER header
166  *
167  * @flags:        flags support (zbva, remote_inv)
168  * @rsvd:         reserved
169  * @write_stag:   write rkey
170  * @write_va:     write virtual address
171  * @reaf_stag:    read rkey
172  * @read_va:      read virtual address
173  */
174 struct iser_hdr {
175 	u8      flags;
176 	u8      rsvd[3];
177 	__be32  write_stag;
178 	__be64  write_va;
179 	__be32  read_stag;
180 	__be64  read_va;
181 } __attribute__((packed));
182 
183 
184 #define ISER_ZBVA_NOT_SUPPORTED		0x80
185 #define ISER_SEND_W_INV_NOT_SUPPORTED	0x40
186 
187 struct iser_cm_hdr {
188 	u8      flags;
189 	u8      rsvd[3];
190 } __packed;
191 
192 /* Constant PDU lengths calculations */
193 #define ISER_HEADERS_LEN  (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
194 
195 #define ISER_RECV_DATA_SEG_LEN	128
196 #define ISER_RX_PAYLOAD_SIZE	(ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
197 #define ISER_RX_LOGIN_SIZE	(ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
198 
199 /* Length of an object name string */
200 #define ISER_OBJECT_NAME_SIZE		    64
201 
202 enum iser_conn_state {
203 	ISER_CONN_INIT,		   /* descriptor allocd, no conn          */
204 	ISER_CONN_PENDING,	   /* in the process of being established */
205 	ISER_CONN_UP,		   /* up and running                      */
206 	ISER_CONN_TERMINATING,	   /* in the process of being terminated  */
207 	ISER_CONN_DOWN,		   /* shut down                           */
208 	ISER_CONN_STATES_NUM
209 };
210 
211 enum iser_task_status {
212 	ISER_TASK_STATUS_INIT = 0,
213 	ISER_TASK_STATUS_STARTED,
214 	ISER_TASK_STATUS_COMPLETED
215 };
216 
217 enum iser_data_dir {
218 	ISER_DIR_IN = 0,	   /* to initiator */
219 	ISER_DIR_OUT,		   /* from initiator */
220 	ISER_DIRS_NUM
221 };
222 
223 /**
224  * struct iser_data_buf - iSER data buffer
225  *
226  * @sg:           pointer to the sg list
227  * @size:         num entries of this sg
228  * @data_len:     total beffer byte len
229  * @dma_nents:    returned by dma_map_sg
230  * @orig_sg:      pointer to the original sg list (in case
231  *                we used a copy)
232  * @orig_size:    num entris of orig sg list
233  */
234 struct iser_data_buf {
235 	struct scatterlist *sg;
236 	unsigned int       size;
237 	unsigned long      data_len;
238 	unsigned int       dma_nents;
239 	struct scatterlist *orig_sg;
240 	unsigned int       orig_size;
241   };
242 
243 /* fwd declarations */
244 struct iser_device;
245 struct iscsi_iser_task;
246 struct iscsi_endpoint;
247 struct iser_reg_resources;
248 
249 /**
250  * struct iser_mem_reg - iSER memory registration info
251  *
252  * @sge:          memory region sg element
253  * @rkey:         memory region remote key
254  * @mem_h:        pointer to registration context (FMR/Fastreg)
255  */
256 struct iser_mem_reg {
257 	struct ib_sge	 sge;
258 	u32		 rkey;
259 	void		*mem_h;
260 };
261 
262 enum iser_desc_type {
263 	ISCSI_TX_CONTROL ,
264 	ISCSI_TX_SCSI_COMMAND,
265 	ISCSI_TX_DATAOUT
266 };
267 
268 /* Maximum number of work requests per task:
269  * Data memory region local invalidate + fast registration
270  * Protection memory region local invalidate + fast registration
271  * Signature memory region local invalidate + fast registration
272  * PDU send
273  */
274 #define ISER_MAX_WRS 7
275 
276 /**
277  * struct iser_tx_desc - iSER TX descriptor (for send wr_id)
278  *
279  * @iser_header:   iser header
280  * @iscsi_header:  iscsi header
281  * @type:          command/control/dataout
282  * @dam_addr:      header buffer dma_address
283  * @tx_sg:         sg[0] points to iser/iscsi headers
284  *                 sg[1] optionally points to either of immediate data
285  *                 unsolicited data-out or control
286  * @num_sge:       number sges used on this TX task
287  * @mapped:        Is the task header mapped
288  * @wr_idx:        Current WR index
289  * @wrs:           Array of WRs per task
290  * @data_reg:      Data buffer registration details
291  * @prot_reg:      Protection buffer registration details
292  * @sig_attrs:     Signature attributes
293  */
294 struct iser_tx_desc {
295 	struct iser_hdr              iser_header;
296 	struct iscsi_hdr             iscsi_header;
297 	enum   iser_desc_type        type;
298 	u64		             dma_addr;
299 	struct ib_sge		     tx_sg[2];
300 	int                          num_sge;
301 	bool			     mapped;
302 	u8                           wr_idx;
303 	struct ib_send_wr            wrs[ISER_MAX_WRS];
304 	struct iser_mem_reg          data_reg;
305 	struct iser_mem_reg          prot_reg;
306 	struct ib_sig_attrs          sig_attrs;
307 };
308 
309 #define ISER_RX_PAD_SIZE	(256 - (ISER_RX_PAYLOAD_SIZE + \
310 					sizeof(u64) + sizeof(struct ib_sge)))
311 /**
312  * struct iser_rx_desc - iSER RX descriptor (for recv wr_id)
313  *
314  * @iser_header:   iser header
315  * @iscsi_header:  iscsi header
316  * @data:          received data segment
317  * @dma_addr:      receive buffer dma address
318  * @rx_sg:         ib_sge of receive buffer
319  * @pad:           for sense data TODO: Modify to maximum sense length supported
320  */
321 struct iser_rx_desc {
322 	struct iser_hdr              iser_header;
323 	struct iscsi_hdr             iscsi_header;
324 	char		             data[ISER_RECV_DATA_SEG_LEN];
325 	u64		             dma_addr;
326 	struct ib_sge		     rx_sg;
327 	char		             pad[ISER_RX_PAD_SIZE];
328 } __attribute__((packed));
329 
330 struct iser_conn;
331 struct ib_conn;
332 struct iscsi_iser_task;
333 
334 /**
335  * struct iser_comp - iSER completion context
336  *
337  * @device:     pointer to device handle
338  * @cq:         completion queue
339  * @wcs:        work completion array
340  * @tasklet:    Tasklet handle
341  * @active_qps: Number of active QPs attached
342  *              to completion context
343  */
344 struct iser_comp {
345 	struct iser_device      *device;
346 	struct ib_cq		*cq;
347 	struct ib_wc		 wcs[ISER_WC_BATCH_COUNT];
348 	struct tasklet_struct	 tasklet;
349 	int                      active_qps;
350 };
351 
352 /**
353  * struct iser_device - Memory registration operations
354  *     per-device registration schemes
355  *
356  * @alloc_reg_res:     Allocate registration resources
357  * @free_reg_res:      Free registration resources
358  * @fast_reg_mem:      Register memory buffers
359  * @unreg_mem:         Un-register memory buffers
360  * @reg_desc_get:      Get a registration descriptor for pool
361  * @reg_desc_put:      Get a registration descriptor to pool
362  */
363 struct iser_reg_ops {
364 	int            (*alloc_reg_res)(struct ib_conn *ib_conn,
365 					unsigned cmds_max,
366 					unsigned int size);
367 	void           (*free_reg_res)(struct ib_conn *ib_conn);
368 	int            (*reg_mem)(struct iscsi_iser_task *iser_task,
369 				  struct iser_data_buf *mem,
370 				  struct iser_reg_resources *rsc,
371 				  struct iser_mem_reg *reg);
372 	void           (*unreg_mem)(struct iscsi_iser_task *iser_task,
373 				    enum iser_data_dir cmd_dir);
374 	struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn);
375 	void           (*reg_desc_put)(struct ib_conn *ib_conn,
376 				       struct iser_fr_desc *desc);
377 };
378 
379 /**
380  * struct iser_device - iSER device handle
381  *
382  * @ib_device:     RDMA device
383  * @pd:            Protection Domain for this device
384  * @dev_attr:      Device attributes container
385  * @mr:            Global DMA memory region
386  * @event_handler: IB events handle routine
387  * @ig_list:	   entry in devices list
388  * @refcount:      Reference counter, dominated by open iser connections
389  * @comps_used:    Number of completion contexts used, Min between online
390  *                 cpus and device max completion vectors
391  * @comps:         Dinamically allocated array of completion handlers
392  * @reg_ops:       Registration ops
393  */
394 struct iser_device {
395 	struct ib_device             *ib_device;
396 	struct ib_pd	             *pd;
397 	struct ib_device_attr	     dev_attr;
398 	struct ib_mr	             *mr;
399 	struct ib_event_handler      event_handler;
400 	struct list_head             ig_list;
401 	int                          refcount;
402 	int			     comps_used;
403 	struct iser_comp	     *comps;
404 	struct iser_reg_ops          *reg_ops;
405 };
406 
407 #define ISER_CHECK_GUARD	0xc0
408 #define ISER_CHECK_REFTAG	0x0f
409 #define ISER_CHECK_APPTAG	0x30
410 
411 /**
412  * struct iser_reg_resources - Fast registration recources
413  *
414  * @mr:         memory region
415  * @fmr_pool:   pool of fmrs
416  * @frpl:       fast reg page list used by frwrs
417  * @page_vec:   fast reg page list used by fmr pool
418  * @mr_valid:   is mr valid indicator
419  */
420 struct iser_reg_resources {
421 	union {
422 		struct ib_mr             *mr;
423 		struct ib_fmr_pool       *fmr_pool;
424 	};
425 	union {
426 		struct ib_fast_reg_page_list     *frpl;
427 		struct iser_page_vec             *page_vec;
428 	};
429 	u8				  mr_valid:1;
430 };
431 
432 /**
433  * struct iser_pi_context - Protection information context
434  *
435  * @rsc:             protection buffer registration resources
436  * @sig_mr:          signature enable memory region
437  * @sig_mr_valid:    is sig_mr valid indicator
438  * @sig_protected:   is region protected indicator
439  */
440 struct iser_pi_context {
441 	struct iser_reg_resources	rsc;
442 	struct ib_mr                   *sig_mr;
443 	u8                              sig_mr_valid:1;
444 	u8                              sig_protected:1;
445 };
446 
447 /**
448  * struct iser_fr_desc - Fast registration descriptor
449  *
450  * @list:           entry in connection fastreg pool
451  * @rsc:            data buffer registration resources
452  * @pi_ctx:         protection information context
453  */
454 struct iser_fr_desc {
455 	struct list_head		  list;
456 	struct iser_reg_resources	  rsc;
457 	struct iser_pi_context		 *pi_ctx;
458 };
459 
460 /**
461  * struct iser_fr_pool: connection fast registration pool
462  *
463  * @list:                list of fastreg descriptors
464  * @lock:                protects fmr/fastreg pool
465  * @size:                size of the pool
466  */
467 struct iser_fr_pool {
468 	struct list_head        list;
469 	spinlock_t              lock;
470 	int                     size;
471 };
472 
473 /**
474  * struct ib_conn - Infiniband related objects
475  *
476  * @cma_id:              rdma_cm connection maneger handle
477  * @qp:                  Connection Queue-pair
478  * @post_recv_buf_count: post receive counter
479  * @sig_count:           send work request signal count
480  * @rx_wr:               receive work request for batch posts
481  * @device:              reference to iser device
482  * @comp:                iser completion context
483  * @pi_support:          Indicate device T10-PI support
484  * @beacon:              beacon send wr to signal all flush errors were drained
485  * @flush_comp:          completes when all connection completions consumed
486  * @fr_pool:             connection fast registration poool
487  */
488 struct ib_conn {
489 	struct rdma_cm_id           *cma_id;
490 	struct ib_qp	            *qp;
491 	int                          post_recv_buf_count;
492 	u8                           sig_count;
493 	struct ib_recv_wr	     rx_wr[ISER_MIN_POSTED_RX];
494 	struct iser_device          *device;
495 	struct iser_comp	    *comp;
496 	bool			     pi_support;
497 	struct ib_send_wr	     beacon;
498 	struct completion	     flush_comp;
499 	struct iser_fr_pool          fr_pool;
500 };
501 
502 /**
503  * struct iser_conn - iSER connection context
504  *
505  * @ib_conn:          connection RDMA resources
506  * @iscsi_conn:       link to matching iscsi connection
507  * @ep:               transport handle
508  * @state:            connection logical state
509  * @qp_max_recv_dtos: maximum number of data outs, corresponds
510  *                    to max number of post recvs
511  * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
512  * @min_posted_rx:    (qp_max_recv_dtos >> 2)
513  * @max_cmds:         maximum cmds allowed for this connection
514  * @name:             connection peer portal
515  * @release_work:     deffered work for release job
516  * @state_mutex:      protects iser onnection state
517  * @stop_completion:  conn_stop completion
518  * @ib_completion:    RDMA cleanup completion
519  * @up_completion:    connection establishment completed
520  *                    (state is ISER_CONN_UP)
521  * @conn_list:        entry in ig conn list
522  * @login_buf:        login data buffer (stores login parameters)
523  * @login_req_buf:    login request buffer
524  * @login_req_dma:    login request buffer dma address
525  * @login_resp_buf:   login response buffer
526  * @login_resp_dma:   login response buffer dma address
527  * @rx_desc_head:     head of rx_descs cyclic buffer
528  * @rx_descs:         rx buffers array (cyclic buffer)
529  * @num_rx_descs:     number of rx descriptors
530  * @scsi_sg_tablesize: scsi host sg_tablesize
531  * @scsi_max_sectors: scsi host max sectors
532  */
533 struct iser_conn {
534 	struct ib_conn		     ib_conn;
535 	struct iscsi_conn	     *iscsi_conn;
536 	struct iscsi_endpoint	     *ep;
537 	enum iser_conn_state	     state;
538 	unsigned		     qp_max_recv_dtos;
539 	unsigned		     qp_max_recv_dtos_mask;
540 	unsigned		     min_posted_rx;
541 	u16                          max_cmds;
542 	char 			     name[ISER_OBJECT_NAME_SIZE];
543 	struct work_struct	     release_work;
544 	struct mutex		     state_mutex;
545 	struct completion	     stop_completion;
546 	struct completion	     ib_completion;
547 	struct completion	     up_completion;
548 	struct list_head	     conn_list;
549 
550 	char  			     *login_buf;
551 	char			     *login_req_buf, *login_resp_buf;
552 	u64			     login_req_dma, login_resp_dma;
553 	unsigned int 		     rx_desc_head;
554 	struct iser_rx_desc	     *rx_descs;
555 	u32                          num_rx_descs;
556 	unsigned short               scsi_sg_tablesize;
557 	unsigned int                 scsi_max_sectors;
558 };
559 
560 /**
561  * struct iscsi_iser_task - iser task context
562  *
563  * @desc:     TX descriptor
564  * @iser_conn:        link to iser connection
565  * @status:           current task status
566  * @sc:               link to scsi command
567  * @command_sent:     indicate if command was sent
568  * @dir:              iser data direction
569  * @rdma_reg:         task rdma registration desc
570  * @data:             iser data buffer desc
571  * @prot:             iser protection buffer desc
572  */
573 struct iscsi_iser_task {
574 	struct iser_tx_desc          desc;
575 	struct iser_conn	     *iser_conn;
576 	enum iser_task_status 	     status;
577 	struct scsi_cmnd	     *sc;
578 	int                          command_sent;
579 	int                          dir[ISER_DIRS_NUM];
580 	struct iser_mem_reg          rdma_reg[ISER_DIRS_NUM];
581 	struct iser_data_buf         data[ISER_DIRS_NUM];
582 	struct iser_data_buf         prot[ISER_DIRS_NUM];
583 };
584 
585 struct iser_page_vec {
586 	u64 *pages;
587 	int length;
588 	int offset;
589 	int data_size;
590 };
591 
592 /**
593  * struct iser_global: iSER global context
594  *
595  * @device_list_mutex:    protects device_list
596  * @device_list:          iser devices global list
597  * @connlist_mutex:       protects connlist
598  * @connlist:             iser connections global list
599  * @desc_cache:           kmem cache for tx dataout
600  */
601 struct iser_global {
602 	struct mutex      device_list_mutex;
603 	struct list_head  device_list;
604 	struct mutex      connlist_mutex;
605 	struct list_head  connlist;
606 	struct kmem_cache *desc_cache;
607 };
608 
609 extern struct iser_global ig;
610 extern int iser_debug_level;
611 extern bool iser_pi_enable;
612 extern int iser_pi_guard;
613 extern unsigned int iser_max_sectors;
614 extern bool iser_always_reg;
615 
616 int iser_assign_reg_ops(struct iser_device *device);
617 
618 int iser_send_control(struct iscsi_conn *conn,
619 		      struct iscsi_task *task);
620 
621 int iser_send_command(struct iscsi_conn *conn,
622 		      struct iscsi_task *task);
623 
624 int iser_send_data_out(struct iscsi_conn *conn,
625 		       struct iscsi_task *task,
626 		       struct iscsi_data *hdr);
627 
628 void iscsi_iser_recv(struct iscsi_conn *conn,
629 		     struct iscsi_hdr *hdr,
630 		     char *rx_data,
631 		     int rx_data_len);
632 
633 void iser_conn_init(struct iser_conn *iser_conn);
634 
635 void iser_conn_release(struct iser_conn *iser_conn);
636 
637 int iser_conn_terminate(struct iser_conn *iser_conn);
638 
639 void iser_release_work(struct work_struct *work);
640 
641 void iser_rcv_completion(struct iser_rx_desc *desc,
642 			 unsigned long dto_xfer_len,
643 			 struct ib_conn *ib_conn);
644 
645 void iser_snd_completion(struct iser_tx_desc *desc,
646 			 struct ib_conn *ib_conn);
647 
648 void iser_task_rdma_init(struct iscsi_iser_task *task);
649 
650 void iser_task_rdma_finalize(struct iscsi_iser_task *task);
651 
652 void iser_free_rx_descriptors(struct iser_conn *iser_conn);
653 
654 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
655 				     struct iser_data_buf *mem,
656 				     enum iser_data_dir cmd_dir);
657 
658 int iser_reg_rdma_mem(struct iscsi_iser_task *task,
659 		      enum iser_data_dir dir);
660 void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
661 			 enum iser_data_dir dir);
662 
663 int  iser_connect(struct iser_conn *iser_conn,
664 		  struct sockaddr *src_addr,
665 		  struct sockaddr *dst_addr,
666 		  int non_blocking);
667 
668 void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
669 			enum iser_data_dir cmd_dir);
670 void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
671 			    enum iser_data_dir cmd_dir);
672 
673 int  iser_post_recvl(struct iser_conn *iser_conn);
674 int  iser_post_recvm(struct iser_conn *iser_conn, int count);
675 int  iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
676 		    bool signal);
677 
678 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
679 			   struct iser_data_buf *data,
680 			   enum iser_data_dir iser_dir,
681 			   enum dma_data_direction dma_dir);
682 
683 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
684 			      struct iser_data_buf *data,
685 			      enum dma_data_direction dir);
686 
687 int  iser_initialize_task_headers(struct iscsi_task *task,
688 			struct iser_tx_desc *tx_desc);
689 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
690 			      struct iscsi_session *session);
691 int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
692 			unsigned cmds_max,
693 			unsigned int size);
694 void iser_free_fmr_pool(struct ib_conn *ib_conn);
695 int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
696 			    unsigned cmds_max,
697 			    unsigned int size);
698 void iser_free_fastreg_pool(struct ib_conn *ib_conn);
699 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
700 			     enum iser_data_dir cmd_dir, sector_t *sector);
701 struct iser_fr_desc *
702 iser_reg_desc_get_fr(struct ib_conn *ib_conn);
703 void
704 iser_reg_desc_put_fr(struct ib_conn *ib_conn,
705 		     struct iser_fr_desc *desc);
706 struct iser_fr_desc *
707 iser_reg_desc_get_fmr(struct ib_conn *ib_conn);
708 void
709 iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
710 		      struct iser_fr_desc *desc);
711 
712 static inline struct ib_send_wr *
713 iser_tx_next_wr(struct iser_tx_desc *tx_desc)
714 {
715 	struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx];
716 	struct ib_send_wr *last_wr;
717 
718 	if (tx_desc->wr_idx) {
719 		last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1];
720 		last_wr->next = cur_wr;
721 	}
722 	tx_desc->wr_idx++;
723 
724 	return cur_wr;
725 }
726 
727 #endif
728