1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Definitions for SMC Connections, Link Groups and Links
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
10 */
11
12 #ifndef _SMC_CORE_H
13 #define _SMC_CORE_H
14
15 #include <linux/atomic.h>
16 #include <linux/smc.h>
17 #include <linux/pci.h>
18 #include <rdma/ib_verbs.h>
19 #include <net/genetlink.h>
20 #include <net/smc.h>
21
22 #include "smc.h"
23 #include "smc_ib.h"
24 #include "smc_clc.h"
25
26 #define SMC_RMBS_PER_LGR_MAX 255 /* max. # of RMBs per link group */
27 #define SMC_CONN_PER_LGR_MIN 16 /* min. # of connections per link group */
28 #define SMC_CONN_PER_LGR_MAX 255 /* max. # of connections per link group,
29 * also is the default value for SMC-R v1 and v2.0
30 */
31 #define SMC_CONN_PER_LGR_PREFER 255 /* Preferred connections per link group used for
32 * SMC-R v2.1 and later negotiation, vendors or
33 * distributions may modify it to a value between
34 * 16-255 as needed.
35 */
36
37 struct smc_lgr_list { /* list of link group definition */
38 struct list_head list;
39 spinlock_t lock; /* protects list of link groups */
40 u32 num; /* unique link group number */
41 };
42
43 enum smc_lgr_role { /* possible roles of a link group */
44 SMC_CLNT, /* client */
45 SMC_SERV /* server */
46 };
47
48 enum smc_link_state { /* possible states of a link */
49 SMC_LNK_UNUSED, /* link is unused */
50 SMC_LNK_INACTIVE, /* link is inactive */
51 SMC_LNK_ACTIVATING, /* link is being activated */
52 SMC_LNK_ACTIVE, /* link is active */
53 };
54
55 #define SMC_WR_BUF_SIZE 48 /* size of work request buffer */
56 #define SMC_WR_BUF_V2_SIZE 8192 /* size of v2 work request buffer */
57
58 struct smc_wr_buf {
59 u8 raw[SMC_WR_BUF_SIZE];
60 };
61
62 struct smc_wr_v2_buf {
63 u8 raw[SMC_WR_BUF_V2_SIZE];
64 };
65
66 #define SMC_WR_REG_MR_WAIT_TIME (5 * HZ)/* wait time for ib_wr_reg_mr result */
67
68 enum smc_wr_reg_state {
69 POSTED, /* ib_wr_reg_mr request posted */
70 CONFIRMED, /* ib_wr_reg_mr response: successful */
71 FAILED /* ib_wr_reg_mr response: failure */
72 };
73
74 struct smc_rdma_sge { /* sges for RDMA writes */
75 struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE];
76 };
77
78 #define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per
79 * message send
80 */
81
82 struct smc_rdma_sges { /* sges per message send */
83 struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES];
84 };
85
86 struct smc_rdma_wr { /* work requests per message
87 * send
88 */
89 struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES];
90 };
91
92 #define SMC_LGR_ID_SIZE 4
93
94 struct smc_link {
95 struct smc_ib_device *smcibdev; /* ib-device */
96 u8 ibport; /* port - values 1 | 2 */
97 struct ib_pd *roce_pd; /* IB protection domain,
98 * unique for every RoCE QP
99 */
100 struct ib_qp *roce_qp; /* IB queue pair */
101 struct ib_qp_attr qp_attr; /* IB queue pair attributes */
102
103 struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */
104 struct ib_send_wr *wr_tx_ibs; /* WR send meta data */
105 struct ib_sge *wr_tx_sges; /* WR send gather meta data */
106 struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/
107 struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */
108 struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */
109 struct completion *wr_tx_compl; /* WR send CQE completion */
110 /* above four vectors have wr_tx_cnt elements and use the same index */
111 struct ib_send_wr *wr_tx_v2_ib; /* WR send v2 meta data */
112 struct ib_sge *wr_tx_v2_sge; /* WR send v2 gather meta data*/
113 struct smc_wr_tx_pend *wr_tx_v2_pend; /* WR send v2 waiting for CQE */
114 dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */
115 dma_addr_t wr_tx_v2_dma_addr; /* DMA address of v2 tx buf*/
116 atomic_long_t wr_tx_id; /* seq # of last sent WR */
117 unsigned long *wr_tx_mask; /* bit mask of used indexes */
118 u32 wr_tx_cnt; /* number of WR send buffers */
119 wait_queue_head_t wr_tx_wait; /* wait for free WR send buf */
120 struct {
121 struct percpu_ref wr_tx_refs;
122 } ____cacheline_aligned_in_smp;
123 struct completion tx_ref_comp;
124
125 struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */
126 struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */
127 struct ib_sge *wr_rx_sges; /* WR recv scatter meta data */
128 /* above three vectors have wr_rx_cnt elements and use the same index */
129 dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */
130 dma_addr_t wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/
131 u64 wr_rx_id; /* seq # of last recv WR */
132 u64 wr_rx_id_compl; /* seq # of last completed WR */
133 u32 wr_rx_cnt; /* number of WR recv buffers */
134 unsigned long wr_rx_tstamp; /* jiffies when last buf rx */
135 wait_queue_head_t wr_rx_empty_wait; /* wait for RQ empty */
136
137 struct ib_reg_wr wr_reg; /* WR register memory region */
138 wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */
139 struct {
140 struct percpu_ref wr_reg_refs;
141 } ____cacheline_aligned_in_smp;
142 struct completion reg_ref_comp;
143 enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */
144
145 u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/
146 u8 sgid_index; /* gid index for vlan id */
147 u32 peer_qpn; /* QP number of peer */
148 enum ib_mtu path_mtu; /* used mtu */
149 enum ib_mtu peer_mtu; /* mtu size of peer */
150 u32 psn_initial; /* QP tx initial packet seqno */
151 u32 peer_psn; /* QP rx initial packet seqno */
152 u8 peer_mac[ETH_ALEN]; /* = gid[8:10||13:15] */
153 u8 peer_gid[SMC_GID_SIZE]; /* gid of peer*/
154 u8 link_id; /* unique # within link group */
155 u8 link_uid[SMC_LGR_ID_SIZE]; /* unique lnk id */
156 u8 peer_link_uid[SMC_LGR_ID_SIZE]; /* peer uid */
157 u8 link_idx; /* index in lgr link array */
158 u8 link_is_asym; /* is link asymmetric? */
159 u8 clearing : 1; /* link is being cleared */
160 refcount_t refcnt; /* link reference count */
161 struct smc_link_group *lgr; /* parent link group */
162 struct work_struct link_down_wrk; /* wrk to bring link down */
163 char ibname[IB_DEVICE_NAME_MAX]; /* ib device name */
164 int ndev_ifidx; /* network device ifindex */
165
166 enum smc_link_state state; /* state of link */
167 struct delayed_work llc_testlink_wrk; /* testlink worker */
168 struct completion llc_testlink_resp; /* wait for rx of testlink */
169 int llc_testlink_time; /* testlink interval */
170 atomic_t conn_cnt; /* connections on this link */
171 };
172
173 /* For now we just allow one parallel link per link group. The SMC protocol
174 * allows more (up to 8).
175 */
176 #define SMC_LINKS_PER_LGR_MAX 3
177 #define SMC_SINGLE_LINK 0
178 #define SMC_LINKS_ADD_LNK_MIN 1 /* min. # of links per link group */
179 #define SMC_LINKS_ADD_LNK_MAX 2 /* max. # of links per link group, also is the
180 * default value for smc-r v1.0 and v2.0
181 */
182 #define SMC_LINKS_PER_LGR_MAX_PREFER 2 /* Preferred max links per link group used for
183 * SMC-R v2.1 and later negotiation, vendors or
184 * distributions may modify it to a value between
185 * 1-2 as needed.
186 */
187
188 /* tx/rx buffer list element for sndbufs list and rmbs list of a lgr */
189 struct smc_buf_desc {
190 struct list_head list;
191 void *cpu_addr; /* virtual address of buffer */
192 struct page *pages;
193 int len; /* length of buffer */
194 u32 used; /* currently used / unused */
195 union {
196 struct { /* SMC-R */
197 struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];
198 /* virtual buffer */
199 struct ib_mr *mr[SMC_LINKS_PER_LGR_MAX];
200 /* memory region: for rmb and
201 * vzalloced sndbuf
202 * incl. rkey provided to peer
203 * and lkey provided to local
204 */
205 u32 order; /* allocation order */
206
207 u8 is_conf_rkey;
208 /* confirm_rkey done */
209 u8 is_reg_mr[SMC_LINKS_PER_LGR_MAX];
210 /* mem region registered */
211 u8 is_map_ib[SMC_LINKS_PER_LGR_MAX];
212 /* mem region mapped to lnk */
213 u8 is_dma_need_sync;
214 u8 is_reg_err;
215 /* buffer registration err */
216 u8 is_vm;
217 /* virtually contiguous */
218 };
219 struct { /* SMC-D */
220 unsigned short sba_idx;
221 /* SBA index number */
222 u64 token;
223 /* DMB token number */
224 dma_addr_t dma_addr;
225 /* DMA address */
226 };
227 };
228 };
229
230 struct smc_rtoken { /* address/key of remote RMB */
231 u64 dma_addr;
232 u32 rkey;
233 };
234
235 #define SMC_BUF_MIN_SIZE 16384 /* minimum size of an RMB */
236 #define SMC_RMBE_SIZES 16 /* number of distinct RMBE sizes */
237 /* theoretically, the RFC states that largest size would be 512K,
238 * i.e. compressed 5 and thus 6 sizes (0..5), despite
239 * struct smc_clc_msg_accept_confirm.rmbe_size being a 4 bit value (0..15)
240 */
241
242 struct smcd_dev;
243
244 enum smc_lgr_type { /* redundancy state of lgr */
245 SMC_LGR_NONE, /* no active links, lgr to be deleted */
246 SMC_LGR_SINGLE, /* 1 active RNIC on each peer */
247 SMC_LGR_SYMMETRIC, /* 2 active RNICs on each peer */
248 SMC_LGR_ASYMMETRIC_PEER, /* local has 2, peer 1 active RNICs */
249 SMC_LGR_ASYMMETRIC_LOCAL, /* local has 1, peer 2 active RNICs */
250 };
251
252 enum smcr_buf_type { /* types of SMC-R sndbufs and RMBs */
253 SMCR_PHYS_CONT_BUFS = 0,
254 SMCR_VIRT_CONT_BUFS = 1,
255 SMCR_MIXED_BUFS = 2,
256 };
257
258 enum smc_llc_flowtype {
259 SMC_LLC_FLOW_NONE = 0,
260 SMC_LLC_FLOW_ADD_LINK = 2,
261 SMC_LLC_FLOW_DEL_LINK = 4,
262 SMC_LLC_FLOW_REQ_ADD_LINK = 5,
263 SMC_LLC_FLOW_RKEY = 6,
264 };
265
266 struct smc_llc_qentry;
267
268 struct smc_llc_flow {
269 enum smc_llc_flowtype type;
270 struct smc_llc_qentry *qentry;
271 };
272
273 struct smc_link_group {
274 struct list_head list;
275 struct rb_root conns_all; /* connection tree */
276 rwlock_t conns_lock; /* protects conns_all */
277 unsigned int conns_num; /* current # of connections */
278 unsigned short vlan_id; /* vlan id of link group */
279
280 struct list_head sndbufs[SMC_RMBE_SIZES];/* tx buffers */
281 struct rw_semaphore sndbufs_lock; /* protects tx buffers */
282 struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */
283 struct rw_semaphore rmbs_lock; /* protects rx buffers */
284 u64 alloc_sndbufs; /* stats of tx buffers */
285 u64 alloc_rmbs; /* stats of rx buffers */
286
287 u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */
288 struct delayed_work free_work; /* delayed freeing of an lgr */
289 struct work_struct terminate_work; /* abnormal lgr termination */
290 struct workqueue_struct *tx_wq; /* wq for conn. tx workers */
291 u8 sync_err : 1; /* lgr no longer fits to peer */
292 u8 terminating : 1;/* lgr is terminating */
293 u8 freeing : 1; /* lgr is being freed */
294
295 refcount_t refcnt; /* lgr reference count */
296 bool is_smcd; /* SMC-R or SMC-D */
297 u8 smc_version;
298 u8 negotiated_eid[SMC_MAX_EID_LEN];
299 u8 peer_os; /* peer operating system */
300 u8 peer_smc_release;
301 u8 peer_hostname[SMC_MAX_HOSTNAME_LEN];
302 union {
303 struct { /* SMC-R */
304 enum smc_lgr_role role;
305 /* client or server */
306 struct smc_link lnk[SMC_LINKS_PER_LGR_MAX];
307 /* smc link */
308 struct smc_wr_v2_buf *wr_rx_buf_v2;
309 /* WR v2 recv payload buffer */
310 struct smc_wr_v2_buf *wr_tx_buf_v2;
311 /* WR v2 send payload buffer */
312 char peer_systemid[SMC_SYSTEMID_LEN];
313 /* unique system_id of peer */
314 struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX]
315 [SMC_LINKS_PER_LGR_MAX];
316 /* remote addr/key pairs */
317 DECLARE_BITMAP(rtokens_used_mask, SMC_RMBS_PER_LGR_MAX);
318 /* used rtoken elements */
319 u8 next_link_id;
320 enum smc_lgr_type type;
321 enum smcr_buf_type buf_type;
322 /* redundancy state */
323 u8 pnet_id[SMC_MAX_PNETID_LEN + 1];
324 /* pnet id of this lgr */
325 struct list_head llc_event_q;
326 /* queue for llc events */
327 spinlock_t llc_event_q_lock;
328 /* protects llc_event_q */
329 struct rw_semaphore llc_conf_mutex;
330 /* protects lgr reconfig. */
331 struct work_struct llc_add_link_work;
332 struct work_struct llc_del_link_work;
333 struct work_struct llc_event_work;
334 /* llc event worker */
335 wait_queue_head_t llc_flow_waiter;
336 /* w4 next llc event */
337 wait_queue_head_t llc_msg_waiter;
338 /* w4 next llc msg */
339 struct smc_llc_flow llc_flow_lcl;
340 /* llc local control field */
341 struct smc_llc_flow llc_flow_rmt;
342 /* llc remote control field */
343 struct smc_llc_qentry *delayed_event;
344 /* arrived when flow active */
345 spinlock_t llc_flow_lock;
346 /* protects llc flow */
347 int llc_testlink_time;
348 /* link keep alive time */
349 u32 llc_termination_rsn;
350 /* rsn code for termination */
351 u8 nexthop_mac[ETH_ALEN];
352 u8 uses_gateway;
353 __be32 saddr;
354 /* net namespace */
355 struct net *net;
356 u8 max_conns;
357 /* max conn can be assigned to lgr */
358 u8 max_links;
359 /* max links can be added in lgr */
360 };
361 struct { /* SMC-D */
362 struct smcd_gid peer_gid;
363 /* Peer GID (remote) */
364 struct smcd_dev *smcd;
365 /* ISM device for VLAN reg. */
366 u8 peer_shutdown : 1;
367 /* peer triggered shutdownn */
368 };
369 };
370 };
371
372 struct smc_clc_msg_local;
373
374 #define GID_LIST_SIZE 2
375
376 struct smc_gidlist {
377 u8 len;
378 u8 list[GID_LIST_SIZE][SMC_GID_SIZE];
379 };
380
381 struct smc_init_info_smcrv2 {
382 /* Input fields */
383 __be32 saddr;
384 struct sock *clc_sk;
385 __be32 daddr;
386
387 /* Output fields when saddr is set */
388 struct smc_ib_device *ib_dev_v2;
389 u8 ib_port_v2;
390 u8 ib_gid_v2[SMC_GID_SIZE];
391
392 /* Additional output fields when clc_sk and daddr is set as well */
393 u8 uses_gateway;
394 u8 nexthop_mac[ETH_ALEN];
395
396 struct smc_gidlist gidlist;
397 };
398
399 #define SMC_MAX_V2_ISM_DEVS SMCD_CLC_MAX_V2_GID_ENTRIES
400 /* max # of proposed non-native ISM devices,
401 * which can't exceed the max # of CHID-GID
402 * entries in CLC proposal SMC-Dv2 extension.
403 */
404 struct smc_init_info {
405 u8 is_smcd;
406 u8 smc_type_v1;
407 u8 smc_type_v2;
408 u8 release_nr;
409 u8 max_conns;
410 u8 max_links;
411 u8 first_contact_peer;
412 u8 first_contact_local;
413 u16 feature_mask;
414 unsigned short vlan_id;
415 u32 rc;
416 u8 negotiated_eid[SMC_MAX_EID_LEN];
417 /* SMC-R */
418 u8 smcr_version;
419 u8 check_smcrv2;
420 u8 peer_gid[SMC_GID_SIZE];
421 u8 peer_mac[ETH_ALEN];
422 u8 peer_systemid[SMC_SYSTEMID_LEN];
423 struct smc_ib_device *ib_dev;
424 u8 ib_gid[SMC_GID_SIZE];
425 u8 ib_port;
426 u32 ib_clcqpn;
427 struct smc_init_info_smcrv2 smcrv2;
428 /* SMC-D */
429 struct smcd_gid ism_peer_gid[SMC_MAX_V2_ISM_DEVS + 1];
430 struct smcd_dev *ism_dev[SMC_MAX_V2_ISM_DEVS + 1];
431 u16 ism_chid[SMC_MAX_V2_ISM_DEVS + 1];
432 u8 ism_offered_cnt; /* # of ISM devices offered */
433 u8 ism_selected; /* index of selected ISM dev*/
434 u8 smcd_version;
435 };
436
437 /* Find the connection associated with the given alert token in the link group.
438 * To use rbtrees we have to implement our own search core.
439 * Requires @conns_lock
440 * @token alert token to search for
441 * @lgr link group to search in
442 * Returns connection associated with token if found, NULL otherwise.
443 */
smc_lgr_find_conn(u32 token,struct smc_link_group * lgr)444 static inline struct smc_connection *smc_lgr_find_conn(
445 u32 token, struct smc_link_group *lgr)
446 {
447 struct smc_connection *res = NULL;
448 struct rb_node *node;
449
450 node = lgr->conns_all.rb_node;
451 while (node) {
452 struct smc_connection *cur = rb_entry(node,
453 struct smc_connection, alert_node);
454
455 if (cur->alert_token_local > token) {
456 node = node->rb_left;
457 } else {
458 if (cur->alert_token_local < token) {
459 node = node->rb_right;
460 } else {
461 res = cur;
462 break;
463 }
464 }
465 }
466
467 return res;
468 }
469
smc_conn_lgr_valid(struct smc_connection * conn)470 static inline bool smc_conn_lgr_valid(struct smc_connection *conn)
471 {
472 return conn->lgr && conn->alert_token_local;
473 }
474
475 /*
476 * Returns true if the specified link is usable.
477 *
478 * usable means the link is ready to receive RDMA messages, map memory
479 * on the link, etc. This doesn't ensure we are able to send RDMA messages
480 * on this link, if sending RDMA messages is needed, use smc_link_sendable()
481 */
smc_link_usable(struct smc_link * lnk)482 static inline bool smc_link_usable(struct smc_link *lnk)
483 {
484 if (lnk->state == SMC_LNK_UNUSED || lnk->state == SMC_LNK_INACTIVE)
485 return false;
486 return true;
487 }
488
489 /*
490 * Returns true if the specified link is ready to receive AND send RDMA
491 * messages.
492 *
493 * For the client side in first contact, the underlying QP may still in
494 * RESET or RTR when the link state is ACTIVATING, checks in smc_link_usable()
495 * is not strong enough. For those places that need to send any CDC or LLC
496 * messages, use smc_link_sendable(), otherwise, use smc_link_usable() instead
497 */
smc_link_sendable(struct smc_link * lnk)498 static inline bool smc_link_sendable(struct smc_link *lnk)
499 {
500 return smc_link_usable(lnk) &&
501 lnk->qp_attr.cur_qp_state == IB_QPS_RTS;
502 }
503
smc_link_active(struct smc_link * lnk)504 static inline bool smc_link_active(struct smc_link *lnk)
505 {
506 return lnk->state == SMC_LNK_ACTIVE;
507 }
508
smc_gid_be16_convert(__u8 * buf,u8 * gid_raw)509 static inline void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw)
510 {
511 sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
512 be16_to_cpu(((__be16 *)gid_raw)[0]),
513 be16_to_cpu(((__be16 *)gid_raw)[1]),
514 be16_to_cpu(((__be16 *)gid_raw)[2]),
515 be16_to_cpu(((__be16 *)gid_raw)[3]),
516 be16_to_cpu(((__be16 *)gid_raw)[4]),
517 be16_to_cpu(((__be16 *)gid_raw)[5]),
518 be16_to_cpu(((__be16 *)gid_raw)[6]),
519 be16_to_cpu(((__be16 *)gid_raw)[7]));
520 }
521
522 struct smc_pci_dev {
523 __u32 pci_fid;
524 __u16 pci_pchid;
525 __u16 pci_vendor;
526 __u16 pci_device;
527 __u8 pci_id[SMC_PCI_ID_STR_LEN];
528 };
529
smc_set_pci_values(struct pci_dev * pci_dev,struct smc_pci_dev * smc_dev)530 static inline void smc_set_pci_values(struct pci_dev *pci_dev,
531 struct smc_pci_dev *smc_dev)
532 {
533 smc_dev->pci_vendor = pci_dev->vendor;
534 smc_dev->pci_device = pci_dev->device;
535 snprintf(smc_dev->pci_id, sizeof(smc_dev->pci_id), "%s",
536 pci_name(pci_dev));
537 #if IS_ENABLED(CONFIG_S390)
538 { /* Set s390 specific PCI information */
539 struct zpci_dev *zdev;
540
541 zdev = to_zpci(pci_dev);
542 smc_dev->pci_fid = zdev->fid;
543 smc_dev->pci_pchid = zdev->pchid;
544 }
545 #endif
546 }
547
548 struct smc_sock;
549 struct smc_clc_msg_accept_confirm;
550
551 void smc_lgr_cleanup_early(struct smc_link_group *lgr);
552 void smc_lgr_terminate_sched(struct smc_link_group *lgr);
553 void smc_lgr_hold(struct smc_link_group *lgr);
554 void smc_lgr_put(struct smc_link_group *lgr);
555 void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport);
556 void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport);
557 void smc_smcd_terminate(struct smcd_dev *dev, struct smcd_gid *peer_gid,
558 unsigned short vlan);
559 void smc_smcd_terminate_all(struct smcd_dev *dev);
560 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev);
561 int smc_buf_create(struct smc_sock *smc, bool is_smcd);
562 int smcd_buf_attach(struct smc_sock *smc);
563 int smc_uncompress_bufsize(u8 compressed);
564 int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_link *link,
565 struct smc_clc_msg_accept_confirm *clc);
566 int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey);
567 int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey);
568 void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
569 __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey);
570 void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
571 __be64 nw_vaddr, __be32 nw_rkey);
572 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn);
573 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn);
574 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini);
575
576 void smc_conn_free(struct smc_connection *conn);
577 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini);
578 int smc_core_init(void);
579 void smc_core_exit(void);
580
581 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
582 u8 link_idx, struct smc_init_info *ini);
583 void smcr_link_clear(struct smc_link *lnk, bool log);
584 void smcr_link_hold(struct smc_link *lnk);
585 void smcr_link_put(struct smc_link *lnk);
586 void smc_switch_link_and_count(struct smc_connection *conn,
587 struct smc_link *to_lnk);
588 int smcr_buf_map_lgr(struct smc_link *lnk);
589 int smcr_buf_reg_lgr(struct smc_link *lnk);
590 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type);
591 void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
592 enum smc_lgr_type new_type, int asym_lnk_idx);
593 int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *rmb_desc);
594 struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
595 struct smc_link *from_lnk, bool is_dev_err);
596 void smcr_link_down_cond(struct smc_link *lnk);
597 void smcr_link_down_cond_sched(struct smc_link *lnk);
598 int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb);
599 int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb);
600 int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb);
601 int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb);
602
smc_get_lgr(struct smc_link * link)603 static inline struct smc_link_group *smc_get_lgr(struct smc_link *link)
604 {
605 return link->lgr;
606 }
607 #endif
608