1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * CLC (connection layer control) handshake over initial TCP socket to 6 * prepare for RDMA traffic 7 * 8 * Copyright IBM Corp. 2016, 2018 9 * 10 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 11 */ 12 13 #include <linux/in.h> 14 #include <linux/inetdevice.h> 15 #include <linux/if_ether.h> 16 #include <linux/sched/signal.h> 17 #include <linux/utsname.h> 18 #include <linux/ctype.h> 19 20 #include <net/addrconf.h> 21 #include <net/sock.h> 22 #include <net/tcp.h> 23 24 #include "smc.h" 25 #include "smc_core.h" 26 #include "smc_clc.h" 27 #include "smc_ib.h" 28 #include "smc_ism.h" 29 #include "smc_netlink.h" 30 31 #define SMCR_CLC_ACCEPT_CONFIRM_LEN 68 32 #define SMCD_CLC_ACCEPT_CONFIRM_LEN 48 33 #define SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 78 34 #define SMCR_CLC_ACCEPT_CONFIRM_LEN_V2 108 35 #define SMC_CLC_RECV_BUF_LEN 100 36 37 /* eye catcher "SMCR" EBCDIC for CLC messages */ 38 static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'}; 39 /* eye catcher "SMCD" EBCDIC for CLC messages */ 40 static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'}; 41 42 static u8 smc_hostname[SMC_MAX_HOSTNAME_LEN]; 43 44 struct smc_clc_eid_table { 45 rwlock_t lock; 46 struct list_head list; 47 u8 ueid_cnt; 48 u8 seid_enabled; 49 }; 50 51 static struct smc_clc_eid_table smc_clc_eid_table; 52 53 struct smc_clc_eid_entry { 54 struct list_head list; 55 u8 eid[SMC_MAX_EID_LEN]; 56 }; 57 58 /* The size of a user EID is 32 characters. 59 * Valid characters should be (single-byte character set) A-Z, 0-9, '.' and '-'. 60 * Blanks should only be used to pad to the expected size. 61 * First character must be alphanumeric. 62 */ 63 static bool smc_clc_ueid_valid(char *ueid) 64 { 65 char *end = ueid + SMC_MAX_EID_LEN; 66 67 while (--end >= ueid && isspace(*end)) 68 ; 69 if (end < ueid) 70 return false; 71 if (!isalnum(*ueid) || islower(*ueid)) 72 return false; 73 while (ueid <= end) { 74 if ((!isalnum(*ueid) || islower(*ueid)) && *ueid != '.' && 75 *ueid != '-') 76 return false; 77 ueid++; 78 } 79 return true; 80 } 81 82 static int smc_clc_ueid_add(char *ueid) 83 { 84 struct smc_clc_eid_entry *new_ueid, *tmp_ueid; 85 int rc; 86 87 if (!smc_clc_ueid_valid(ueid)) 88 return -EINVAL; 89 90 /* add a new ueid entry to the ueid table if there isn't one */ 91 new_ueid = kzalloc(sizeof(*new_ueid), GFP_KERNEL); 92 if (!new_ueid) 93 return -ENOMEM; 94 memcpy(new_ueid->eid, ueid, SMC_MAX_EID_LEN); 95 96 write_lock(&smc_clc_eid_table.lock); 97 if (smc_clc_eid_table.ueid_cnt >= SMC_MAX_UEID) { 98 rc = -ERANGE; 99 goto err_out; 100 } 101 list_for_each_entry(tmp_ueid, &smc_clc_eid_table.list, list) { 102 if (!memcmp(tmp_ueid->eid, ueid, SMC_MAX_EID_LEN)) { 103 rc = -EEXIST; 104 goto err_out; 105 } 106 } 107 list_add_tail(&new_ueid->list, &smc_clc_eid_table.list); 108 smc_clc_eid_table.ueid_cnt++; 109 write_unlock(&smc_clc_eid_table.lock); 110 return 0; 111 112 err_out: 113 write_unlock(&smc_clc_eid_table.lock); 114 kfree(new_ueid); 115 return rc; 116 } 117 118 int smc_clc_ueid_count(void) 119 { 120 int count; 121 122 read_lock(&smc_clc_eid_table.lock); 123 count = smc_clc_eid_table.ueid_cnt; 124 read_unlock(&smc_clc_eid_table.lock); 125 126 return count; 127 } 128 129 int smc_nl_add_ueid(struct sk_buff *skb, struct genl_info *info) 130 { 131 struct nlattr *nla_ueid = info->attrs[SMC_NLA_EID_TABLE_ENTRY]; 132 char *ueid; 133 134 if (!nla_ueid || nla_len(nla_ueid) != SMC_MAX_EID_LEN + 1) 135 return -EINVAL; 136 ueid = (char *)nla_data(nla_ueid); 137 138 return smc_clc_ueid_add(ueid); 139 } 140 141 /* remove one or all ueid entries from the table */ 142 static int smc_clc_ueid_remove(char *ueid) 143 { 144 struct smc_clc_eid_entry *lst_ueid, *tmp_ueid; 145 int rc = -ENOENT; 146 147 /* remove table entry */ 148 write_lock(&smc_clc_eid_table.lock); 149 list_for_each_entry_safe(lst_ueid, tmp_ueid, &smc_clc_eid_table.list, 150 list) { 151 if (!ueid || !memcmp(lst_ueid->eid, ueid, SMC_MAX_EID_LEN)) { 152 list_del(&lst_ueid->list); 153 smc_clc_eid_table.ueid_cnt--; 154 kfree(lst_ueid); 155 rc = 0; 156 } 157 } 158 if (!rc && !smc_clc_eid_table.ueid_cnt) { 159 smc_clc_eid_table.seid_enabled = 1; 160 rc = -EAGAIN; /* indicate success and enabling of seid */ 161 } 162 write_unlock(&smc_clc_eid_table.lock); 163 return rc; 164 } 165 166 int smc_nl_remove_ueid(struct sk_buff *skb, struct genl_info *info) 167 { 168 struct nlattr *nla_ueid = info->attrs[SMC_NLA_EID_TABLE_ENTRY]; 169 char *ueid; 170 171 if (!nla_ueid || nla_len(nla_ueid) != SMC_MAX_EID_LEN + 1) 172 return -EINVAL; 173 ueid = (char *)nla_data(nla_ueid); 174 175 return smc_clc_ueid_remove(ueid); 176 } 177 178 int smc_nl_flush_ueid(struct sk_buff *skb, struct genl_info *info) 179 { 180 smc_clc_ueid_remove(NULL); 181 return 0; 182 } 183 184 static int smc_nl_ueid_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq, 185 u32 flags, char *ueid) 186 { 187 char ueid_str[SMC_MAX_EID_LEN + 1]; 188 void *hdr; 189 190 hdr = genlmsg_put(skb, portid, seq, &smc_gen_nl_family, 191 flags, SMC_NETLINK_DUMP_UEID); 192 if (!hdr) 193 return -ENOMEM; 194 snprintf(ueid_str, sizeof(ueid_str), "%s", ueid); 195 if (nla_put_string(skb, SMC_NLA_EID_TABLE_ENTRY, ueid_str)) { 196 genlmsg_cancel(skb, hdr); 197 return -EMSGSIZE; 198 } 199 genlmsg_end(skb, hdr); 200 return 0; 201 } 202 203 static int _smc_nl_ueid_dump(struct sk_buff *skb, u32 portid, u32 seq, 204 int start_idx) 205 { 206 struct smc_clc_eid_entry *lst_ueid; 207 int idx = 0; 208 209 read_lock(&smc_clc_eid_table.lock); 210 list_for_each_entry(lst_ueid, &smc_clc_eid_table.list, list) { 211 if (idx++ < start_idx) 212 continue; 213 if (smc_nl_ueid_dumpinfo(skb, portid, seq, NLM_F_MULTI, 214 lst_ueid->eid)) { 215 --idx; 216 break; 217 } 218 } 219 read_unlock(&smc_clc_eid_table.lock); 220 return idx; 221 } 222 223 int smc_nl_dump_ueid(struct sk_buff *skb, struct netlink_callback *cb) 224 { 225 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); 226 int idx; 227 228 idx = _smc_nl_ueid_dump(skb, NETLINK_CB(cb->skb).portid, 229 cb->nlh->nlmsg_seq, cb_ctx->pos[0]); 230 231 cb_ctx->pos[0] = idx; 232 return skb->len; 233 } 234 235 int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb) 236 { 237 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); 238 char seid_str[SMC_MAX_EID_LEN + 1]; 239 u8 seid_enabled; 240 void *hdr; 241 u8 *seid; 242 243 if (cb_ctx->pos[0]) 244 return skb->len; 245 246 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 247 &smc_gen_nl_family, NLM_F_MULTI, 248 SMC_NETLINK_DUMP_SEID); 249 if (!hdr) 250 return -ENOMEM; 251 if (!smc_ism_is_v2_capable()) 252 goto end; 253 254 smc_ism_get_system_eid(&seid); 255 snprintf(seid_str, sizeof(seid_str), "%s", seid); 256 if (nla_put_string(skb, SMC_NLA_SEID_ENTRY, seid_str)) 257 goto err; 258 read_lock(&smc_clc_eid_table.lock); 259 seid_enabled = smc_clc_eid_table.seid_enabled; 260 read_unlock(&smc_clc_eid_table.lock); 261 if (nla_put_u8(skb, SMC_NLA_SEID_ENABLED, seid_enabled)) 262 goto err; 263 end: 264 genlmsg_end(skb, hdr); 265 cb_ctx->pos[0]++; 266 return skb->len; 267 err: 268 genlmsg_cancel(skb, hdr); 269 return -EMSGSIZE; 270 } 271 272 int smc_nl_enable_seid(struct sk_buff *skb, struct genl_info *info) 273 { 274 write_lock(&smc_clc_eid_table.lock); 275 smc_clc_eid_table.seid_enabled = 1; 276 write_unlock(&smc_clc_eid_table.lock); 277 return 0; 278 } 279 280 int smc_nl_disable_seid(struct sk_buff *skb, struct genl_info *info) 281 { 282 int rc = 0; 283 284 write_lock(&smc_clc_eid_table.lock); 285 if (!smc_clc_eid_table.ueid_cnt) 286 rc = -ENOENT; 287 else 288 smc_clc_eid_table.seid_enabled = 0; 289 write_unlock(&smc_clc_eid_table.lock); 290 return rc; 291 } 292 293 static bool _smc_clc_match_ueid(u8 *peer_ueid) 294 { 295 struct smc_clc_eid_entry *tmp_ueid; 296 297 list_for_each_entry(tmp_ueid, &smc_clc_eid_table.list, list) { 298 if (!memcmp(tmp_ueid->eid, peer_ueid, SMC_MAX_EID_LEN)) 299 return true; 300 } 301 return false; 302 } 303 304 bool smc_clc_match_eid(u8 *negotiated_eid, 305 struct smc_clc_v2_extension *smc_v2_ext, 306 u8 *peer_eid, u8 *local_eid) 307 { 308 bool match = false; 309 int i; 310 311 negotiated_eid[0] = 0; 312 read_lock(&smc_clc_eid_table.lock); 313 if (peer_eid && local_eid && 314 smc_clc_eid_table.seid_enabled && 315 smc_v2_ext->hdr.flag.seid && 316 !memcmp(peer_eid, local_eid, SMC_MAX_EID_LEN)) { 317 memcpy(negotiated_eid, peer_eid, SMC_MAX_EID_LEN); 318 match = true; 319 goto out; 320 } 321 322 for (i = 0; i < smc_v2_ext->hdr.eid_cnt; i++) { 323 if (_smc_clc_match_ueid(smc_v2_ext->user_eids[i])) { 324 memcpy(negotiated_eid, smc_v2_ext->user_eids[i], 325 SMC_MAX_EID_LEN); 326 match = true; 327 goto out; 328 } 329 } 330 out: 331 read_unlock(&smc_clc_eid_table.lock); 332 return match; 333 } 334 335 /* check arriving CLC proposal */ 336 static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc) 337 { 338 struct smc_clc_msg_proposal_prefix *pclc_prfx; 339 struct smc_clc_smcd_v2_extension *smcd_v2_ext; 340 struct smc_clc_msg_hdr *hdr = &pclc->hdr; 341 struct smc_clc_v2_extension *v2_ext; 342 343 v2_ext = smc_get_clc_v2_ext(pclc); 344 pclc_prfx = smc_clc_proposal_get_prefix(pclc); 345 if (hdr->version == SMC_V1) { 346 if (hdr->typev1 == SMC_TYPE_N) 347 return false; 348 if (ntohs(hdr->length) != 349 sizeof(*pclc) + ntohs(pclc->iparea_offset) + 350 sizeof(*pclc_prfx) + 351 pclc_prfx->ipv6_prefixes_cnt * 352 sizeof(struct smc_clc_ipv6_prefix) + 353 sizeof(struct smc_clc_msg_trail)) 354 return false; 355 } else { 356 if (ntohs(hdr->length) != 357 sizeof(*pclc) + 358 sizeof(struct smc_clc_msg_smcd) + 359 (hdr->typev1 != SMC_TYPE_N ? 360 sizeof(*pclc_prfx) + 361 pclc_prfx->ipv6_prefixes_cnt * 362 sizeof(struct smc_clc_ipv6_prefix) : 0) + 363 (hdr->typev2 != SMC_TYPE_N ? 364 sizeof(*v2_ext) + 365 v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN : 0) + 366 (smcd_indicated(hdr->typev2) ? 367 sizeof(*smcd_v2_ext) + v2_ext->hdr.ism_gid_cnt * 368 sizeof(struct smc_clc_smcd_gid_chid) : 369 0) + 370 sizeof(struct smc_clc_msg_trail)) 371 return false; 372 } 373 return true; 374 } 375 376 /* check arriving CLC accept or confirm */ 377 static bool 378 smc_clc_msg_acc_conf_valid(struct smc_clc_msg_accept_confirm_v2 *clc_v2) 379 { 380 struct smc_clc_msg_hdr *hdr = &clc_v2->hdr; 381 382 if (hdr->typev1 != SMC_TYPE_R && hdr->typev1 != SMC_TYPE_D) 383 return false; 384 if (hdr->version == SMC_V1) { 385 if ((hdr->typev1 == SMC_TYPE_R && 386 ntohs(hdr->length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) || 387 (hdr->typev1 == SMC_TYPE_D && 388 ntohs(hdr->length) != SMCD_CLC_ACCEPT_CONFIRM_LEN)) 389 return false; 390 } else { 391 if (hdr->typev1 == SMC_TYPE_D && 392 ntohs(hdr->length) != SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 && 393 (ntohs(hdr->length) != SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 + 394 sizeof(struct smc_clc_first_contact_ext))) 395 return false; 396 if (hdr->typev1 == SMC_TYPE_R && 397 ntohs(hdr->length) < SMCR_CLC_ACCEPT_CONFIRM_LEN_V2) 398 return false; 399 } 400 return true; 401 } 402 403 /* check arriving CLC decline */ 404 static bool 405 smc_clc_msg_decl_valid(struct smc_clc_msg_decline *dclc) 406 { 407 struct smc_clc_msg_hdr *hdr = &dclc->hdr; 408 409 if (hdr->typev1 != SMC_TYPE_R && hdr->typev1 != SMC_TYPE_D) 410 return false; 411 if (hdr->version == SMC_V1) { 412 if (ntohs(hdr->length) != sizeof(struct smc_clc_msg_decline)) 413 return false; 414 } else { 415 if (ntohs(hdr->length) != sizeof(struct smc_clc_msg_decline_v2)) 416 return false; 417 } 418 return true; 419 } 420 421 static void smc_clc_fill_fce(struct smc_clc_first_contact_ext *fce, int *len) 422 { 423 memset(fce, 0, sizeof(*fce)); 424 fce->os_type = SMC_CLC_OS_LINUX; 425 fce->release = SMC_RELEASE; 426 memcpy(fce->hostname, smc_hostname, sizeof(smc_hostname)); 427 (*len) += sizeof(*fce); 428 } 429 430 /* check if received message has a correct header length and contains valid 431 * heading and trailing eyecatchers 432 */ 433 static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl) 434 { 435 struct smc_clc_msg_accept_confirm_v2 *clc_v2; 436 struct smc_clc_msg_proposal *pclc; 437 struct smc_clc_msg_decline *dclc; 438 struct smc_clc_msg_trail *trl; 439 440 if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && 441 memcmp(clcm->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER))) 442 return false; 443 switch (clcm->type) { 444 case SMC_CLC_PROPOSAL: 445 pclc = (struct smc_clc_msg_proposal *)clcm; 446 if (!smc_clc_msg_prop_valid(pclc)) 447 return false; 448 trl = (struct smc_clc_msg_trail *) 449 ((u8 *)pclc + ntohs(pclc->hdr.length) - sizeof(*trl)); 450 break; 451 case SMC_CLC_ACCEPT: 452 case SMC_CLC_CONFIRM: 453 clc_v2 = (struct smc_clc_msg_accept_confirm_v2 *)clcm; 454 if (!smc_clc_msg_acc_conf_valid(clc_v2)) 455 return false; 456 trl = (struct smc_clc_msg_trail *) 457 ((u8 *)clc_v2 + ntohs(clc_v2->hdr.length) - 458 sizeof(*trl)); 459 break; 460 case SMC_CLC_DECLINE: 461 dclc = (struct smc_clc_msg_decline *)clcm; 462 if (!smc_clc_msg_decl_valid(dclc)) 463 return false; 464 check_trl = false; 465 break; 466 default: 467 return false; 468 } 469 if (check_trl && 470 memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && 471 memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER))) 472 return false; 473 return true; 474 } 475 476 /* find ipv4 addr on device and get the prefix len, fill CLC proposal msg */ 477 static int smc_clc_prfx_set4_rcu(struct dst_entry *dst, __be32 ipv4, 478 struct smc_clc_msg_proposal_prefix *prop) 479 { 480 struct in_device *in_dev = __in_dev_get_rcu(dst->dev); 481 const struct in_ifaddr *ifa; 482 483 if (!in_dev) 484 return -ENODEV; 485 486 in_dev_for_each_ifa_rcu(ifa, in_dev) { 487 if (!inet_ifa_match(ipv4, ifa)) 488 continue; 489 prop->prefix_len = inet_mask_len(ifa->ifa_mask); 490 prop->outgoing_subnet = ifa->ifa_address & ifa->ifa_mask; 491 /* prop->ipv6_prefixes_cnt = 0; already done by memset before */ 492 return 0; 493 } 494 return -ENOENT; 495 } 496 497 /* fill CLC proposal msg with ipv6 prefixes from device */ 498 static int smc_clc_prfx_set6_rcu(struct dst_entry *dst, 499 struct smc_clc_msg_proposal_prefix *prop, 500 struct smc_clc_ipv6_prefix *ipv6_prfx) 501 { 502 #if IS_ENABLED(CONFIG_IPV6) 503 struct inet6_dev *in6_dev = __in6_dev_get(dst->dev); 504 struct inet6_ifaddr *ifa; 505 int cnt = 0; 506 507 if (!in6_dev) 508 return -ENODEV; 509 /* use a maximum of 8 IPv6 prefixes from device */ 510 list_for_each_entry(ifa, &in6_dev->addr_list, if_list) { 511 if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL) 512 continue; 513 ipv6_addr_prefix(&ipv6_prfx[cnt].prefix, 514 &ifa->addr, ifa->prefix_len); 515 ipv6_prfx[cnt].prefix_len = ifa->prefix_len; 516 cnt++; 517 if (cnt == SMC_CLC_MAX_V6_PREFIX) 518 break; 519 } 520 prop->ipv6_prefixes_cnt = cnt; 521 if (cnt) 522 return 0; 523 #endif 524 return -ENOENT; 525 } 526 527 /* retrieve and set prefixes in CLC proposal msg */ 528 static int smc_clc_prfx_set(struct socket *clcsock, 529 struct smc_clc_msg_proposal_prefix *prop, 530 struct smc_clc_ipv6_prefix *ipv6_prfx) 531 { 532 struct dst_entry *dst = sk_dst_get(clcsock->sk); 533 struct sockaddr_storage addrs; 534 struct sockaddr_in6 *addr6; 535 struct sockaddr_in *addr; 536 int rc = -ENOENT; 537 538 if (!dst) { 539 rc = -ENOTCONN; 540 goto out; 541 } 542 if (!dst->dev) { 543 rc = -ENODEV; 544 goto out_rel; 545 } 546 /* get address to which the internal TCP socket is bound */ 547 if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0) 548 goto out_rel; 549 /* analyze IP specific data of net_device belonging to TCP socket */ 550 addr6 = (struct sockaddr_in6 *)&addrs; 551 rcu_read_lock(); 552 if (addrs.ss_family == PF_INET) { 553 /* IPv4 */ 554 addr = (struct sockaddr_in *)&addrs; 555 rc = smc_clc_prfx_set4_rcu(dst, addr->sin_addr.s_addr, prop); 556 } else if (ipv6_addr_v4mapped(&addr6->sin6_addr)) { 557 /* mapped IPv4 address - peer is IPv4 only */ 558 rc = smc_clc_prfx_set4_rcu(dst, addr6->sin6_addr.s6_addr32[3], 559 prop); 560 } else { 561 /* IPv6 */ 562 rc = smc_clc_prfx_set6_rcu(dst, prop, ipv6_prfx); 563 } 564 rcu_read_unlock(); 565 out_rel: 566 dst_release(dst); 567 out: 568 return rc; 569 } 570 571 /* match ipv4 addrs of dev against addr in CLC proposal */ 572 static int smc_clc_prfx_match4_rcu(struct net_device *dev, 573 struct smc_clc_msg_proposal_prefix *prop) 574 { 575 struct in_device *in_dev = __in_dev_get_rcu(dev); 576 const struct in_ifaddr *ifa; 577 578 if (!in_dev) 579 return -ENODEV; 580 in_dev_for_each_ifa_rcu(ifa, in_dev) { 581 if (prop->prefix_len == inet_mask_len(ifa->ifa_mask) && 582 inet_ifa_match(prop->outgoing_subnet, ifa)) 583 return 0; 584 } 585 586 return -ENOENT; 587 } 588 589 /* match ipv6 addrs of dev against addrs in CLC proposal */ 590 static int smc_clc_prfx_match6_rcu(struct net_device *dev, 591 struct smc_clc_msg_proposal_prefix *prop) 592 { 593 #if IS_ENABLED(CONFIG_IPV6) 594 struct inet6_dev *in6_dev = __in6_dev_get(dev); 595 struct smc_clc_ipv6_prefix *ipv6_prfx; 596 struct inet6_ifaddr *ifa; 597 int i, max; 598 599 if (!in6_dev) 600 return -ENODEV; 601 /* ipv6 prefix list starts behind smc_clc_msg_proposal_prefix */ 602 ipv6_prfx = (struct smc_clc_ipv6_prefix *)((u8 *)prop + sizeof(*prop)); 603 max = min_t(u8, prop->ipv6_prefixes_cnt, SMC_CLC_MAX_V6_PREFIX); 604 list_for_each_entry(ifa, &in6_dev->addr_list, if_list) { 605 if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL) 606 continue; 607 for (i = 0; i < max; i++) { 608 if (ifa->prefix_len == ipv6_prfx[i].prefix_len && 609 ipv6_prefix_equal(&ifa->addr, &ipv6_prfx[i].prefix, 610 ifa->prefix_len)) 611 return 0; 612 } 613 } 614 #endif 615 return -ENOENT; 616 } 617 618 /* check if proposed prefixes match one of our device prefixes */ 619 int smc_clc_prfx_match(struct socket *clcsock, 620 struct smc_clc_msg_proposal_prefix *prop) 621 { 622 struct dst_entry *dst = sk_dst_get(clcsock->sk); 623 int rc; 624 625 if (!dst) { 626 rc = -ENOTCONN; 627 goto out; 628 } 629 if (!dst->dev) { 630 rc = -ENODEV; 631 goto out_rel; 632 } 633 rcu_read_lock(); 634 if (!prop->ipv6_prefixes_cnt) 635 rc = smc_clc_prfx_match4_rcu(dst->dev, prop); 636 else 637 rc = smc_clc_prfx_match6_rcu(dst->dev, prop); 638 rcu_read_unlock(); 639 out_rel: 640 dst_release(dst); 641 out: 642 return rc; 643 } 644 645 /* Wait for data on the tcp-socket, analyze received data 646 * Returns: 647 * 0 if success and it was not a decline that we received. 648 * SMC_CLC_DECL_REPLY if decline received for fallback w/o another decl send. 649 * clcsock error, -EINTR, -ECONNRESET, -EPROTO otherwise. 650 */ 651 int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, 652 u8 expected_type, unsigned long timeout) 653 { 654 long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo; 655 struct sock *clc_sk = smc->clcsock->sk; 656 struct smc_clc_msg_hdr *clcm = buf; 657 struct msghdr msg = {NULL, 0}; 658 int reason_code = 0; 659 struct kvec vec = {buf, buflen}; 660 int len, datlen, recvlen; 661 bool check_trl = true; 662 int krflags; 663 664 /* peek the first few bytes to determine length of data to receive 665 * so we don't consume any subsequent CLC message or payload data 666 * in the TCP byte stream 667 */ 668 /* 669 * Caller must make sure that buflen is no less than 670 * sizeof(struct smc_clc_msg_hdr) 671 */ 672 krflags = MSG_PEEK | MSG_WAITALL; 673 clc_sk->sk_rcvtimeo = timeout; 674 iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, 675 sizeof(struct smc_clc_msg_hdr)); 676 len = sock_recvmsg(smc->clcsock, &msg, krflags); 677 if (signal_pending(current)) { 678 reason_code = -EINTR; 679 clc_sk->sk_err = EINTR; 680 smc->sk.sk_err = EINTR; 681 goto out; 682 } 683 if (clc_sk->sk_err) { 684 reason_code = -clc_sk->sk_err; 685 if (clc_sk->sk_err == EAGAIN && 686 expected_type == SMC_CLC_DECLINE) 687 clc_sk->sk_err = 0; /* reset for fallback usage */ 688 else 689 smc->sk.sk_err = clc_sk->sk_err; 690 goto out; 691 } 692 if (!len) { /* peer has performed orderly shutdown */ 693 smc->sk.sk_err = ECONNRESET; 694 reason_code = -ECONNRESET; 695 goto out; 696 } 697 if (len < 0) { 698 if (len != -EAGAIN || expected_type != SMC_CLC_DECLINE) 699 smc->sk.sk_err = -len; 700 reason_code = len; 701 goto out; 702 } 703 datlen = ntohs(clcm->length); 704 if ((len < sizeof(struct smc_clc_msg_hdr)) || 705 (clcm->version < SMC_V1) || 706 ((clcm->type != SMC_CLC_DECLINE) && 707 (clcm->type != expected_type))) { 708 smc->sk.sk_err = EPROTO; 709 reason_code = -EPROTO; 710 goto out; 711 } 712 713 /* receive the complete CLC message */ 714 memset(&msg, 0, sizeof(struct msghdr)); 715 if (datlen > buflen) { 716 check_trl = false; 717 recvlen = buflen; 718 } else { 719 recvlen = datlen; 720 } 721 iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen); 722 krflags = MSG_WAITALL; 723 len = sock_recvmsg(smc->clcsock, &msg, krflags); 724 if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) { 725 smc->sk.sk_err = EPROTO; 726 reason_code = -EPROTO; 727 goto out; 728 } 729 datlen -= len; 730 while (datlen) { 731 u8 tmp[SMC_CLC_RECV_BUF_LEN]; 732 733 vec.iov_base = &tmp; 734 vec.iov_len = SMC_CLC_RECV_BUF_LEN; 735 /* receive remaining proposal message */ 736 recvlen = datlen > SMC_CLC_RECV_BUF_LEN ? 737 SMC_CLC_RECV_BUF_LEN : datlen; 738 iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen); 739 len = sock_recvmsg(smc->clcsock, &msg, krflags); 740 datlen -= len; 741 } 742 if (clcm->type == SMC_CLC_DECLINE) { 743 struct smc_clc_msg_decline *dclc; 744 745 dclc = (struct smc_clc_msg_decline *)clcm; 746 reason_code = SMC_CLC_DECL_PEERDECL; 747 smc->peer_diagnosis = ntohl(dclc->peer_diagnosis); 748 if (((struct smc_clc_msg_decline *)buf)->hdr.typev2 & 749 SMC_FIRST_CONTACT_MASK) { 750 smc->conn.lgr->sync_err = 1; 751 smc_lgr_terminate_sched(smc->conn.lgr); 752 } 753 } 754 755 out: 756 clc_sk->sk_rcvtimeo = rcvtimeo; 757 return reason_code; 758 } 759 760 /* send CLC DECLINE message across internal TCP socket */ 761 int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, u8 version) 762 { 763 struct smc_clc_msg_decline *dclc_v1; 764 struct smc_clc_msg_decline_v2 dclc; 765 struct msghdr msg; 766 int len, send_len; 767 struct kvec vec; 768 769 dclc_v1 = (struct smc_clc_msg_decline *)&dclc; 770 memset(&dclc, 0, sizeof(dclc)); 771 memcpy(dclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); 772 dclc.hdr.type = SMC_CLC_DECLINE; 773 dclc.hdr.version = version; 774 dclc.os_type = version == SMC_V1 ? 0 : SMC_CLC_OS_LINUX; 775 dclc.hdr.typev2 = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 776 SMC_FIRST_CONTACT_MASK : 0; 777 if ((!smc->conn.lgr || !smc->conn.lgr->is_smcd) && 778 smc_ib_is_valid_local_systemid()) 779 memcpy(dclc.id_for_peer, local_systemid, 780 sizeof(local_systemid)); 781 dclc.peer_diagnosis = htonl(peer_diag_info); 782 if (version == SMC_V1) { 783 memcpy(dclc_v1->trl.eyecatcher, SMC_EYECATCHER, 784 sizeof(SMC_EYECATCHER)); 785 send_len = sizeof(*dclc_v1); 786 } else { 787 memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, 788 sizeof(SMC_EYECATCHER)); 789 send_len = sizeof(dclc); 790 } 791 dclc.hdr.length = htons(send_len); 792 793 memset(&msg, 0, sizeof(msg)); 794 vec.iov_base = &dclc; 795 vec.iov_len = send_len; 796 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, send_len); 797 if (len < 0 || len < send_len) 798 len = -EPROTO; 799 return len > 0 ? 0 : len; 800 } 801 802 /* send CLC PROPOSAL message across internal TCP socket */ 803 int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini) 804 { 805 struct smc_clc_smcd_v2_extension *smcd_v2_ext; 806 struct smc_clc_msg_proposal_prefix *pclc_prfx; 807 struct smc_clc_msg_proposal *pclc_base; 808 struct smc_clc_smcd_gid_chid *gidchids; 809 struct smc_clc_msg_proposal_area *pclc; 810 struct smc_clc_ipv6_prefix *ipv6_prfx; 811 struct smc_clc_v2_extension *v2_ext; 812 struct smc_clc_msg_smcd *pclc_smcd; 813 struct smc_clc_msg_trail *trl; 814 int len, i, plen, rc; 815 int reason_code = 0; 816 struct kvec vec[8]; 817 struct msghdr msg; 818 819 pclc = kzalloc(sizeof(*pclc), GFP_KERNEL); 820 if (!pclc) 821 return -ENOMEM; 822 823 pclc_base = &pclc->pclc_base; 824 pclc_smcd = &pclc->pclc_smcd; 825 pclc_prfx = &pclc->pclc_prfx; 826 ipv6_prfx = pclc->pclc_prfx_ipv6; 827 v2_ext = &pclc->pclc_v2_ext; 828 smcd_v2_ext = &pclc->pclc_smcd_v2_ext; 829 gidchids = pclc->pclc_gidchids; 830 trl = &pclc->pclc_trl; 831 832 pclc_base->hdr.version = SMC_V2; 833 pclc_base->hdr.typev1 = ini->smc_type_v1; 834 pclc_base->hdr.typev2 = ini->smc_type_v2; 835 plen = sizeof(*pclc_base) + sizeof(*pclc_smcd) + sizeof(*trl); 836 837 /* retrieve ip prefixes for CLC proposal msg */ 838 if (ini->smc_type_v1 != SMC_TYPE_N) { 839 rc = smc_clc_prfx_set(smc->clcsock, pclc_prfx, ipv6_prfx); 840 if (rc) { 841 if (ini->smc_type_v2 == SMC_TYPE_N) { 842 kfree(pclc); 843 return SMC_CLC_DECL_CNFERR; 844 } 845 pclc_base->hdr.typev1 = SMC_TYPE_N; 846 } else { 847 pclc_base->iparea_offset = htons(sizeof(*pclc_smcd)); 848 plen += sizeof(*pclc_prfx) + 849 pclc_prfx->ipv6_prefixes_cnt * 850 sizeof(ipv6_prfx[0]); 851 } 852 } 853 854 /* build SMC Proposal CLC message */ 855 memcpy(pclc_base->hdr.eyecatcher, SMC_EYECATCHER, 856 sizeof(SMC_EYECATCHER)); 857 pclc_base->hdr.type = SMC_CLC_PROPOSAL; 858 if (smcr_indicated(ini->smc_type_v1)) { 859 /* add SMC-R specifics */ 860 memcpy(pclc_base->lcl.id_for_peer, local_systemid, 861 sizeof(local_systemid)); 862 memcpy(pclc_base->lcl.gid, ini->ib_gid, SMC_GID_SIZE); 863 memcpy(pclc_base->lcl.mac, &ini->ib_dev->mac[ini->ib_port - 1], 864 ETH_ALEN); 865 } 866 if (smcd_indicated(ini->smc_type_v1)) { 867 /* add SMC-D specifics */ 868 if (ini->ism_dev[0]) { 869 pclc_smcd->ism.gid = htonll(ini->ism_dev[0]->local_gid); 870 pclc_smcd->ism.chid = 871 htons(smc_ism_get_chid(ini->ism_dev[0])); 872 } 873 } 874 if (ini->smc_type_v2 == SMC_TYPE_N) { 875 pclc_smcd->v2_ext_offset = 0; 876 } else { 877 struct smc_clc_eid_entry *ueident; 878 u16 v2_ext_offset; 879 880 v2_ext->hdr.flag.release = SMC_RELEASE; 881 v2_ext_offset = sizeof(*pclc_smcd) - 882 offsetofend(struct smc_clc_msg_smcd, v2_ext_offset); 883 if (ini->smc_type_v1 != SMC_TYPE_N) 884 v2_ext_offset += sizeof(*pclc_prfx) + 885 pclc_prfx->ipv6_prefixes_cnt * 886 sizeof(ipv6_prfx[0]); 887 pclc_smcd->v2_ext_offset = htons(v2_ext_offset); 888 plen += sizeof(*v2_ext); 889 890 read_lock(&smc_clc_eid_table.lock); 891 v2_ext->hdr.eid_cnt = smc_clc_eid_table.ueid_cnt; 892 plen += smc_clc_eid_table.ueid_cnt * SMC_MAX_EID_LEN; 893 i = 0; 894 list_for_each_entry(ueident, &smc_clc_eid_table.list, list) { 895 memcpy(v2_ext->user_eids[i++], ueident->eid, 896 sizeof(ueident->eid)); 897 } 898 read_unlock(&smc_clc_eid_table.lock); 899 } 900 if (smcd_indicated(ini->smc_type_v2)) { 901 u8 *eid = NULL; 902 903 v2_ext->hdr.flag.seid = smc_clc_eid_table.seid_enabled; 904 v2_ext->hdr.ism_gid_cnt = ini->ism_offered_cnt; 905 v2_ext->hdr.smcd_v2_ext_offset = htons(sizeof(*v2_ext) - 906 offsetofend(struct smc_clnt_opts_area_hdr, 907 smcd_v2_ext_offset) + 908 v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN); 909 smc_ism_get_system_eid(&eid); 910 if (eid && v2_ext->hdr.flag.seid) 911 memcpy(smcd_v2_ext->system_eid, eid, SMC_MAX_EID_LEN); 912 plen += sizeof(*smcd_v2_ext); 913 if (ini->ism_offered_cnt) { 914 for (i = 1; i <= ini->ism_offered_cnt; i++) { 915 gidchids[i - 1].gid = 916 htonll(ini->ism_dev[i]->local_gid); 917 gidchids[i - 1].chid = 918 htons(smc_ism_get_chid(ini->ism_dev[i])); 919 } 920 plen += ini->ism_offered_cnt * 921 sizeof(struct smc_clc_smcd_gid_chid); 922 } 923 } 924 if (smcr_indicated(ini->smc_type_v2)) 925 memcpy(v2_ext->roce, ini->smcrv2.ib_gid_v2, SMC_GID_SIZE); 926 927 pclc_base->hdr.length = htons(plen); 928 memcpy(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); 929 930 /* send SMC Proposal CLC message */ 931 memset(&msg, 0, sizeof(msg)); 932 i = 0; 933 vec[i].iov_base = pclc_base; 934 vec[i++].iov_len = sizeof(*pclc_base); 935 vec[i].iov_base = pclc_smcd; 936 vec[i++].iov_len = sizeof(*pclc_smcd); 937 if (ini->smc_type_v1 != SMC_TYPE_N) { 938 vec[i].iov_base = pclc_prfx; 939 vec[i++].iov_len = sizeof(*pclc_prfx); 940 if (pclc_prfx->ipv6_prefixes_cnt > 0) { 941 vec[i].iov_base = ipv6_prfx; 942 vec[i++].iov_len = pclc_prfx->ipv6_prefixes_cnt * 943 sizeof(ipv6_prfx[0]); 944 } 945 } 946 if (ini->smc_type_v2 != SMC_TYPE_N) { 947 vec[i].iov_base = v2_ext; 948 vec[i++].iov_len = sizeof(*v2_ext) + 949 (v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN); 950 if (smcd_indicated(ini->smc_type_v2)) { 951 vec[i].iov_base = smcd_v2_ext; 952 vec[i++].iov_len = sizeof(*smcd_v2_ext); 953 if (ini->ism_offered_cnt) { 954 vec[i].iov_base = gidchids; 955 vec[i++].iov_len = ini->ism_offered_cnt * 956 sizeof(struct smc_clc_smcd_gid_chid); 957 } 958 } 959 } 960 vec[i].iov_base = trl; 961 vec[i++].iov_len = sizeof(*trl); 962 /* due to the few bytes needed for clc-handshake this cannot block */ 963 len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen); 964 if (len < 0) { 965 smc->sk.sk_err = smc->clcsock->sk->sk_err; 966 reason_code = -smc->sk.sk_err; 967 } else if (len < ntohs(pclc_base->hdr.length)) { 968 reason_code = -ENETUNREACH; 969 smc->sk.sk_err = -reason_code; 970 } 971 972 kfree(pclc); 973 return reason_code; 974 } 975 976 /* build and send CLC CONFIRM / ACCEPT message */ 977 static int smc_clc_send_confirm_accept(struct smc_sock *smc, 978 struct smc_clc_msg_accept_confirm_v2 *clc_v2, 979 int first_contact, u8 version, 980 u8 *eid, struct smc_init_info *ini) 981 { 982 struct smc_connection *conn = &smc->conn; 983 struct smc_clc_msg_accept_confirm *clc; 984 struct smc_clc_first_contact_ext fce; 985 struct smc_clc_fce_gid_ext gle; 986 struct smc_clc_msg_trail trl; 987 struct kvec vec[5]; 988 struct msghdr msg; 989 int i, len; 990 991 /* send SMC Confirm CLC msg */ 992 clc = (struct smc_clc_msg_accept_confirm *)clc_v2; 993 clc->hdr.version = version; /* SMC version */ 994 if (first_contact) 995 clc->hdr.typev2 |= SMC_FIRST_CONTACT_MASK; 996 if (conn->lgr->is_smcd) { 997 /* SMC-D specific settings */ 998 memcpy(clc->hdr.eyecatcher, SMCD_EYECATCHER, 999 sizeof(SMCD_EYECATCHER)); 1000 clc->hdr.typev1 = SMC_TYPE_D; 1001 clc->d0.gid = conn->lgr->smcd->local_gid; 1002 clc->d0.token = conn->rmb_desc->token; 1003 clc->d0.dmbe_size = conn->rmbe_size_short; 1004 clc->d0.dmbe_idx = 0; 1005 memcpy(&clc->d0.linkid, conn->lgr->id, SMC_LGR_ID_SIZE); 1006 if (version == SMC_V1) { 1007 clc->hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN); 1008 } else { 1009 clc_v2->d1.chid = 1010 htons(smc_ism_get_chid(conn->lgr->smcd)); 1011 if (eid && eid[0]) 1012 memcpy(clc_v2->d1.eid, eid, SMC_MAX_EID_LEN); 1013 len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2; 1014 if (first_contact) 1015 smc_clc_fill_fce(&fce, &len); 1016 clc_v2->hdr.length = htons(len); 1017 } 1018 memcpy(trl.eyecatcher, SMCD_EYECATCHER, 1019 sizeof(SMCD_EYECATCHER)); 1020 } else { 1021 struct smc_link *link = conn->lnk; 1022 1023 /* SMC-R specific settings */ 1024 link = conn->lnk; 1025 memcpy(clc->hdr.eyecatcher, SMC_EYECATCHER, 1026 sizeof(SMC_EYECATCHER)); 1027 clc->hdr.typev1 = SMC_TYPE_R; 1028 clc->hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN); 1029 memcpy(clc->r0.lcl.id_for_peer, local_systemid, 1030 sizeof(local_systemid)); 1031 memcpy(&clc->r0.lcl.gid, link->gid, SMC_GID_SIZE); 1032 memcpy(&clc->r0.lcl.mac, &link->smcibdev->mac[link->ibport - 1], 1033 ETH_ALEN); 1034 hton24(clc->r0.qpn, link->roce_qp->qp_num); 1035 clc->r0.rmb_rkey = 1036 htonl(conn->rmb_desc->mr_rx[link->link_idx]->rkey); 1037 clc->r0.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */ 1038 clc->r0.rmbe_alert_token = htonl(conn->alert_token_local); 1039 switch (clc->hdr.type) { 1040 case SMC_CLC_ACCEPT: 1041 clc->r0.qp_mtu = link->path_mtu; 1042 break; 1043 case SMC_CLC_CONFIRM: 1044 clc->r0.qp_mtu = min(link->path_mtu, link->peer_mtu); 1045 break; 1046 } 1047 clc->r0.rmbe_size = conn->rmbe_size_short; 1048 clc->r0.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address 1049 (conn->rmb_desc->sgt[link->link_idx].sgl)); 1050 hton24(clc->r0.psn, link->psn_initial); 1051 if (version == SMC_V1) { 1052 clc->hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN); 1053 } else { 1054 if (eid && eid[0]) 1055 memcpy(clc_v2->r1.eid, eid, SMC_MAX_EID_LEN); 1056 len = SMCR_CLC_ACCEPT_CONFIRM_LEN_V2; 1057 if (first_contact) { 1058 smc_clc_fill_fce(&fce, &len); 1059 fce.v2_direct = !link->lgr->uses_gateway; 1060 memset(&gle, 0, sizeof(gle)); 1061 if (ini && clc->hdr.type == SMC_CLC_CONFIRM) { 1062 gle.gid_cnt = ini->smcrv2.gidlist.len; 1063 len += sizeof(gle); 1064 len += gle.gid_cnt * sizeof(gle.gid[0]); 1065 } else { 1066 len += sizeof(gle.reserved); 1067 } 1068 } 1069 clc_v2->hdr.length = htons(len); 1070 } 1071 memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); 1072 } 1073 1074 memset(&msg, 0, sizeof(msg)); 1075 i = 0; 1076 vec[i].iov_base = clc_v2; 1077 if (version > SMC_V1) 1078 vec[i++].iov_len = (clc->hdr.typev1 == SMC_TYPE_D ? 1079 SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 : 1080 SMCR_CLC_ACCEPT_CONFIRM_LEN_V2) - 1081 sizeof(trl); 1082 else 1083 vec[i++].iov_len = (clc->hdr.typev1 == SMC_TYPE_D ? 1084 SMCD_CLC_ACCEPT_CONFIRM_LEN : 1085 SMCR_CLC_ACCEPT_CONFIRM_LEN) - 1086 sizeof(trl); 1087 if (version > SMC_V1 && first_contact) { 1088 vec[i].iov_base = &fce; 1089 vec[i++].iov_len = sizeof(fce); 1090 if (!conn->lgr->is_smcd) { 1091 if (clc->hdr.type == SMC_CLC_CONFIRM) { 1092 vec[i].iov_base = &gle; 1093 vec[i++].iov_len = sizeof(gle); 1094 vec[i].iov_base = &ini->smcrv2.gidlist.list; 1095 vec[i++].iov_len = gle.gid_cnt * 1096 sizeof(gle.gid[0]); 1097 } else { 1098 vec[i].iov_base = &gle.reserved; 1099 vec[i++].iov_len = sizeof(gle.reserved); 1100 } 1101 } 1102 } 1103 vec[i].iov_base = &trl; 1104 vec[i++].iov_len = sizeof(trl); 1105 return kernel_sendmsg(smc->clcsock, &msg, vec, 1, 1106 ntohs(clc->hdr.length)); 1107 } 1108 1109 /* send CLC CONFIRM message across internal TCP socket */ 1110 int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact, 1111 u8 version, u8 *eid, struct smc_init_info *ini) 1112 { 1113 struct smc_clc_msg_accept_confirm_v2 cclc_v2; 1114 int reason_code = 0; 1115 int len; 1116 1117 /* send SMC Confirm CLC msg */ 1118 memset(&cclc_v2, 0, sizeof(cclc_v2)); 1119 cclc_v2.hdr.type = SMC_CLC_CONFIRM; 1120 len = smc_clc_send_confirm_accept(smc, &cclc_v2, clnt_first_contact, 1121 version, eid, ini); 1122 if (len < ntohs(cclc_v2.hdr.length)) { 1123 if (len >= 0) { 1124 reason_code = -ENETUNREACH; 1125 smc->sk.sk_err = -reason_code; 1126 } else { 1127 smc->sk.sk_err = smc->clcsock->sk->sk_err; 1128 reason_code = -smc->sk.sk_err; 1129 } 1130 } 1131 return reason_code; 1132 } 1133 1134 /* send CLC ACCEPT message across internal TCP socket */ 1135 int smc_clc_send_accept(struct smc_sock *new_smc, bool srv_first_contact, 1136 u8 version, u8 *negotiated_eid) 1137 { 1138 struct smc_clc_msg_accept_confirm_v2 aclc_v2; 1139 int len; 1140 1141 memset(&aclc_v2, 0, sizeof(aclc_v2)); 1142 aclc_v2.hdr.type = SMC_CLC_ACCEPT; 1143 len = smc_clc_send_confirm_accept(new_smc, &aclc_v2, srv_first_contact, 1144 version, negotiated_eid, NULL); 1145 if (len < ntohs(aclc_v2.hdr.length)) 1146 len = len >= 0 ? -EPROTO : -new_smc->clcsock->sk->sk_err; 1147 1148 return len > 0 ? 0 : len; 1149 } 1150 1151 void smc_clc_get_hostname(u8 **host) 1152 { 1153 *host = &smc_hostname[0]; 1154 } 1155 1156 void __init smc_clc_init(void) 1157 { 1158 struct new_utsname *u; 1159 1160 memset(smc_hostname, _S, sizeof(smc_hostname)); /* ASCII blanks */ 1161 u = utsname(); 1162 memcpy(smc_hostname, u->nodename, 1163 min_t(size_t, strlen(u->nodename), sizeof(smc_hostname))); 1164 1165 INIT_LIST_HEAD(&smc_clc_eid_table.list); 1166 rwlock_init(&smc_clc_eid_table.lock); 1167 smc_clc_eid_table.ueid_cnt = 0; 1168 smc_clc_eid_table.seid_enabled = 1; 1169 } 1170 1171 void smc_clc_exit(void) 1172 { 1173 smc_clc_ueid_remove(NULL); 1174 } 1175