1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/err.h> 38 #include <linux/random.h> 39 #include <linux/spinlock.h> 40 #include <linux/slab.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kref.h> 43 #include <linux/idr.h> 44 #include <linux/workqueue.h> 45 #include <uapi/linux/if_ether.h> 46 #include <rdma/ib_pack.h> 47 #include <rdma/ib_cache.h> 48 #include <rdma/rdma_netlink.h> 49 #include <net/netlink.h> 50 #include <uapi/rdma/ib_user_sa.h> 51 #include <rdma/ib_marshall.h> 52 #include <rdma/ib_addr.h> 53 #include "sa.h" 54 #include "core_priv.h" 55 56 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 57 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 58 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 59 #define IB_SA_CPI_MAX_RETRY_CNT 3 60 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */ 61 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT; 62 63 struct ib_sa_sm_ah { 64 struct ib_ah *ah; 65 struct kref ref; 66 u16 pkey_index; 67 u8 src_path_mask; 68 }; 69 70 enum rdma_class_port_info_type { 71 RDMA_CLASS_PORT_INFO_IB, 72 RDMA_CLASS_PORT_INFO_OPA 73 }; 74 75 struct rdma_class_port_info { 76 enum rdma_class_port_info_type type; 77 union { 78 struct ib_class_port_info ib; 79 struct opa_class_port_info opa; 80 }; 81 }; 82 83 struct ib_sa_classport_cache { 84 bool valid; 85 int retry_cnt; 86 struct rdma_class_port_info data; 87 }; 88 89 struct ib_sa_port { 90 struct ib_mad_agent *agent; 91 struct ib_sa_sm_ah *sm_ah; 92 struct work_struct update_task; 93 struct ib_sa_classport_cache classport_info; 94 struct delayed_work ib_cpi_work; 95 spinlock_t classport_lock; /* protects class port info set */ 96 spinlock_t ah_lock; 97 u8 port_num; 98 }; 99 100 struct ib_sa_device { 101 int start_port, end_port; 102 struct ib_event_handler event_handler; 103 struct ib_sa_port port[0]; 104 }; 105 106 struct ib_sa_query { 107 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); 108 void (*release)(struct ib_sa_query *); 109 struct ib_sa_client *client; 110 struct ib_sa_port *port; 111 struct ib_mad_send_buf *mad_buf; 112 struct ib_sa_sm_ah *sm_ah; 113 int id; 114 u32 flags; 115 struct list_head list; /* Local svc request list */ 116 u32 seq; /* Local svc request sequence number */ 117 unsigned long timeout; /* Local svc timeout */ 118 u8 path_use; /* How will the pathrecord be used */ 119 }; 120 121 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 122 #define IB_SA_CANCEL 0x00000002 123 #define IB_SA_QUERY_OPA 0x00000004 124 125 struct ib_sa_service_query { 126 void (*callback)(int, struct ib_sa_service_rec *, void *); 127 void *context; 128 struct ib_sa_query sa_query; 129 }; 130 131 struct ib_sa_path_query { 132 void (*callback)(int, struct sa_path_rec *, void *); 133 void *context; 134 struct ib_sa_query sa_query; 135 struct sa_path_rec *conv_pr; 136 }; 137 138 struct ib_sa_guidinfo_query { 139 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); 140 void *context; 141 struct ib_sa_query sa_query; 142 }; 143 144 struct ib_sa_classport_info_query { 145 void (*callback)(void *); 146 void *context; 147 struct ib_sa_query sa_query; 148 }; 149 150 struct ib_sa_mcmember_query { 151 void (*callback)(int, struct ib_sa_mcmember_rec *, void *); 152 void *context; 153 struct ib_sa_query sa_query; 154 }; 155 156 static LIST_HEAD(ib_nl_request_list); 157 static DEFINE_SPINLOCK(ib_nl_request_lock); 158 static atomic_t ib_nl_sa_request_seq; 159 static struct workqueue_struct *ib_nl_wq; 160 static struct delayed_work ib_nl_timed_work; 161 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = { 162 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY, 163 .len = sizeof(struct ib_path_rec_data)}, 164 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32}, 165 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64}, 166 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, 167 .len = sizeof(struct rdma_nla_ls_gid)}, 168 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY, 169 .len = sizeof(struct rdma_nla_ls_gid)}, 170 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8}, 171 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16}, 172 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16}, 173 }; 174 175 176 static void ib_sa_add_one(struct ib_device *device); 177 static void ib_sa_remove_one(struct ib_device *device, void *client_data); 178 179 static struct ib_client sa_client = { 180 .name = "sa", 181 .add = ib_sa_add_one, 182 .remove = ib_sa_remove_one 183 }; 184 185 static DEFINE_SPINLOCK(idr_lock); 186 static DEFINE_IDR(query_idr); 187 188 static DEFINE_SPINLOCK(tid_lock); 189 static u32 tid; 190 191 #define PATH_REC_FIELD(field) \ 192 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \ 193 .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \ 194 .field_name = "sa_path_rec:" #field 195 196 static const struct ib_field path_rec_table[] = { 197 { PATH_REC_FIELD(ib.service_id), 198 .offset_words = 0, 199 .offset_bits = 0, 200 .size_bits = 64 }, 201 { PATH_REC_FIELD(dgid), 202 .offset_words = 2, 203 .offset_bits = 0, 204 .size_bits = 128 }, 205 { PATH_REC_FIELD(sgid), 206 .offset_words = 6, 207 .offset_bits = 0, 208 .size_bits = 128 }, 209 { PATH_REC_FIELD(ib.dlid), 210 .offset_words = 10, 211 .offset_bits = 0, 212 .size_bits = 16 }, 213 { PATH_REC_FIELD(ib.slid), 214 .offset_words = 10, 215 .offset_bits = 16, 216 .size_bits = 16 }, 217 { PATH_REC_FIELD(ib.raw_traffic), 218 .offset_words = 11, 219 .offset_bits = 0, 220 .size_bits = 1 }, 221 { RESERVED, 222 .offset_words = 11, 223 .offset_bits = 1, 224 .size_bits = 3 }, 225 { PATH_REC_FIELD(flow_label), 226 .offset_words = 11, 227 .offset_bits = 4, 228 .size_bits = 20 }, 229 { PATH_REC_FIELD(hop_limit), 230 .offset_words = 11, 231 .offset_bits = 24, 232 .size_bits = 8 }, 233 { PATH_REC_FIELD(traffic_class), 234 .offset_words = 12, 235 .offset_bits = 0, 236 .size_bits = 8 }, 237 { PATH_REC_FIELD(reversible), 238 .offset_words = 12, 239 .offset_bits = 8, 240 .size_bits = 1 }, 241 { PATH_REC_FIELD(numb_path), 242 .offset_words = 12, 243 .offset_bits = 9, 244 .size_bits = 7 }, 245 { PATH_REC_FIELD(pkey), 246 .offset_words = 12, 247 .offset_bits = 16, 248 .size_bits = 16 }, 249 { PATH_REC_FIELD(qos_class), 250 .offset_words = 13, 251 .offset_bits = 0, 252 .size_bits = 12 }, 253 { PATH_REC_FIELD(sl), 254 .offset_words = 13, 255 .offset_bits = 12, 256 .size_bits = 4 }, 257 { PATH_REC_FIELD(mtu_selector), 258 .offset_words = 13, 259 .offset_bits = 16, 260 .size_bits = 2 }, 261 { PATH_REC_FIELD(mtu), 262 .offset_words = 13, 263 .offset_bits = 18, 264 .size_bits = 6 }, 265 { PATH_REC_FIELD(rate_selector), 266 .offset_words = 13, 267 .offset_bits = 24, 268 .size_bits = 2 }, 269 { PATH_REC_FIELD(rate), 270 .offset_words = 13, 271 .offset_bits = 26, 272 .size_bits = 6 }, 273 { PATH_REC_FIELD(packet_life_time_selector), 274 .offset_words = 14, 275 .offset_bits = 0, 276 .size_bits = 2 }, 277 { PATH_REC_FIELD(packet_life_time), 278 .offset_words = 14, 279 .offset_bits = 2, 280 .size_bits = 6 }, 281 { PATH_REC_FIELD(preference), 282 .offset_words = 14, 283 .offset_bits = 8, 284 .size_bits = 8 }, 285 { RESERVED, 286 .offset_words = 14, 287 .offset_bits = 16, 288 .size_bits = 48 }, 289 }; 290 291 #define OPA_PATH_REC_FIELD(field) \ 292 .struct_offset_bytes = \ 293 offsetof(struct sa_path_rec, field), \ 294 .struct_size_bytes = \ 295 sizeof((struct sa_path_rec *)0)->field, \ 296 .field_name = "sa_path_rec:" #field 297 298 static const struct ib_field opa_path_rec_table[] = { 299 { OPA_PATH_REC_FIELD(opa.service_id), 300 .offset_words = 0, 301 .offset_bits = 0, 302 .size_bits = 64 }, 303 { OPA_PATH_REC_FIELD(dgid), 304 .offset_words = 2, 305 .offset_bits = 0, 306 .size_bits = 128 }, 307 { OPA_PATH_REC_FIELD(sgid), 308 .offset_words = 6, 309 .offset_bits = 0, 310 .size_bits = 128 }, 311 { OPA_PATH_REC_FIELD(opa.dlid), 312 .offset_words = 10, 313 .offset_bits = 0, 314 .size_bits = 32 }, 315 { OPA_PATH_REC_FIELD(opa.slid), 316 .offset_words = 11, 317 .offset_bits = 0, 318 .size_bits = 32 }, 319 { OPA_PATH_REC_FIELD(opa.raw_traffic), 320 .offset_words = 12, 321 .offset_bits = 0, 322 .size_bits = 1 }, 323 { RESERVED, 324 .offset_words = 12, 325 .offset_bits = 1, 326 .size_bits = 3 }, 327 { OPA_PATH_REC_FIELD(flow_label), 328 .offset_words = 12, 329 .offset_bits = 4, 330 .size_bits = 20 }, 331 { OPA_PATH_REC_FIELD(hop_limit), 332 .offset_words = 12, 333 .offset_bits = 24, 334 .size_bits = 8 }, 335 { OPA_PATH_REC_FIELD(traffic_class), 336 .offset_words = 13, 337 .offset_bits = 0, 338 .size_bits = 8 }, 339 { OPA_PATH_REC_FIELD(reversible), 340 .offset_words = 13, 341 .offset_bits = 8, 342 .size_bits = 1 }, 343 { OPA_PATH_REC_FIELD(numb_path), 344 .offset_words = 13, 345 .offset_bits = 9, 346 .size_bits = 7 }, 347 { OPA_PATH_REC_FIELD(pkey), 348 .offset_words = 13, 349 .offset_bits = 16, 350 .size_bits = 16 }, 351 { OPA_PATH_REC_FIELD(opa.l2_8B), 352 .offset_words = 14, 353 .offset_bits = 0, 354 .size_bits = 1 }, 355 { OPA_PATH_REC_FIELD(opa.l2_10B), 356 .offset_words = 14, 357 .offset_bits = 1, 358 .size_bits = 1 }, 359 { OPA_PATH_REC_FIELD(opa.l2_9B), 360 .offset_words = 14, 361 .offset_bits = 2, 362 .size_bits = 1 }, 363 { OPA_PATH_REC_FIELD(opa.l2_16B), 364 .offset_words = 14, 365 .offset_bits = 3, 366 .size_bits = 1 }, 367 { RESERVED, 368 .offset_words = 14, 369 .offset_bits = 4, 370 .size_bits = 2 }, 371 { OPA_PATH_REC_FIELD(opa.qos_type), 372 .offset_words = 14, 373 .offset_bits = 6, 374 .size_bits = 2 }, 375 { OPA_PATH_REC_FIELD(opa.qos_priority), 376 .offset_words = 14, 377 .offset_bits = 8, 378 .size_bits = 8 }, 379 { RESERVED, 380 .offset_words = 14, 381 .offset_bits = 16, 382 .size_bits = 3 }, 383 { OPA_PATH_REC_FIELD(sl), 384 .offset_words = 14, 385 .offset_bits = 19, 386 .size_bits = 5 }, 387 { RESERVED, 388 .offset_words = 14, 389 .offset_bits = 24, 390 .size_bits = 8 }, 391 { OPA_PATH_REC_FIELD(mtu_selector), 392 .offset_words = 15, 393 .offset_bits = 0, 394 .size_bits = 2 }, 395 { OPA_PATH_REC_FIELD(mtu), 396 .offset_words = 15, 397 .offset_bits = 2, 398 .size_bits = 6 }, 399 { OPA_PATH_REC_FIELD(rate_selector), 400 .offset_words = 15, 401 .offset_bits = 8, 402 .size_bits = 2 }, 403 { OPA_PATH_REC_FIELD(rate), 404 .offset_words = 15, 405 .offset_bits = 10, 406 .size_bits = 6 }, 407 { OPA_PATH_REC_FIELD(packet_life_time_selector), 408 .offset_words = 15, 409 .offset_bits = 16, 410 .size_bits = 2 }, 411 { OPA_PATH_REC_FIELD(packet_life_time), 412 .offset_words = 15, 413 .offset_bits = 18, 414 .size_bits = 6 }, 415 { OPA_PATH_REC_FIELD(preference), 416 .offset_words = 15, 417 .offset_bits = 24, 418 .size_bits = 8 }, 419 }; 420 421 #define MCMEMBER_REC_FIELD(field) \ 422 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ 423 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \ 424 .field_name = "sa_mcmember_rec:" #field 425 426 static const struct ib_field mcmember_rec_table[] = { 427 { MCMEMBER_REC_FIELD(mgid), 428 .offset_words = 0, 429 .offset_bits = 0, 430 .size_bits = 128 }, 431 { MCMEMBER_REC_FIELD(port_gid), 432 .offset_words = 4, 433 .offset_bits = 0, 434 .size_bits = 128 }, 435 { MCMEMBER_REC_FIELD(qkey), 436 .offset_words = 8, 437 .offset_bits = 0, 438 .size_bits = 32 }, 439 { MCMEMBER_REC_FIELD(mlid), 440 .offset_words = 9, 441 .offset_bits = 0, 442 .size_bits = 16 }, 443 { MCMEMBER_REC_FIELD(mtu_selector), 444 .offset_words = 9, 445 .offset_bits = 16, 446 .size_bits = 2 }, 447 { MCMEMBER_REC_FIELD(mtu), 448 .offset_words = 9, 449 .offset_bits = 18, 450 .size_bits = 6 }, 451 { MCMEMBER_REC_FIELD(traffic_class), 452 .offset_words = 9, 453 .offset_bits = 24, 454 .size_bits = 8 }, 455 { MCMEMBER_REC_FIELD(pkey), 456 .offset_words = 10, 457 .offset_bits = 0, 458 .size_bits = 16 }, 459 { MCMEMBER_REC_FIELD(rate_selector), 460 .offset_words = 10, 461 .offset_bits = 16, 462 .size_bits = 2 }, 463 { MCMEMBER_REC_FIELD(rate), 464 .offset_words = 10, 465 .offset_bits = 18, 466 .size_bits = 6 }, 467 { MCMEMBER_REC_FIELD(packet_life_time_selector), 468 .offset_words = 10, 469 .offset_bits = 24, 470 .size_bits = 2 }, 471 { MCMEMBER_REC_FIELD(packet_life_time), 472 .offset_words = 10, 473 .offset_bits = 26, 474 .size_bits = 6 }, 475 { MCMEMBER_REC_FIELD(sl), 476 .offset_words = 11, 477 .offset_bits = 0, 478 .size_bits = 4 }, 479 { MCMEMBER_REC_FIELD(flow_label), 480 .offset_words = 11, 481 .offset_bits = 4, 482 .size_bits = 20 }, 483 { MCMEMBER_REC_FIELD(hop_limit), 484 .offset_words = 11, 485 .offset_bits = 24, 486 .size_bits = 8 }, 487 { MCMEMBER_REC_FIELD(scope), 488 .offset_words = 12, 489 .offset_bits = 0, 490 .size_bits = 4 }, 491 { MCMEMBER_REC_FIELD(join_state), 492 .offset_words = 12, 493 .offset_bits = 4, 494 .size_bits = 4 }, 495 { MCMEMBER_REC_FIELD(proxy_join), 496 .offset_words = 12, 497 .offset_bits = 8, 498 .size_bits = 1 }, 499 { RESERVED, 500 .offset_words = 12, 501 .offset_bits = 9, 502 .size_bits = 23 }, 503 }; 504 505 #define SERVICE_REC_FIELD(field) \ 506 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \ 507 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \ 508 .field_name = "sa_service_rec:" #field 509 510 static const struct ib_field service_rec_table[] = { 511 { SERVICE_REC_FIELD(id), 512 .offset_words = 0, 513 .offset_bits = 0, 514 .size_bits = 64 }, 515 { SERVICE_REC_FIELD(gid), 516 .offset_words = 2, 517 .offset_bits = 0, 518 .size_bits = 128 }, 519 { SERVICE_REC_FIELD(pkey), 520 .offset_words = 6, 521 .offset_bits = 0, 522 .size_bits = 16 }, 523 { SERVICE_REC_FIELD(lease), 524 .offset_words = 7, 525 .offset_bits = 0, 526 .size_bits = 32 }, 527 { SERVICE_REC_FIELD(key), 528 .offset_words = 8, 529 .offset_bits = 0, 530 .size_bits = 128 }, 531 { SERVICE_REC_FIELD(name), 532 .offset_words = 12, 533 .offset_bits = 0, 534 .size_bits = 64*8 }, 535 { SERVICE_REC_FIELD(data8), 536 .offset_words = 28, 537 .offset_bits = 0, 538 .size_bits = 16*8 }, 539 { SERVICE_REC_FIELD(data16), 540 .offset_words = 32, 541 .offset_bits = 0, 542 .size_bits = 8*16 }, 543 { SERVICE_REC_FIELD(data32), 544 .offset_words = 36, 545 .offset_bits = 0, 546 .size_bits = 4*32 }, 547 { SERVICE_REC_FIELD(data64), 548 .offset_words = 40, 549 .offset_bits = 0, 550 .size_bits = 2*64 }, 551 }; 552 553 #define CLASSPORTINFO_REC_FIELD(field) \ 554 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \ 555 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \ 556 .field_name = "ib_class_port_info:" #field 557 558 static const struct ib_field ib_classport_info_rec_table[] = { 559 { CLASSPORTINFO_REC_FIELD(base_version), 560 .offset_words = 0, 561 .offset_bits = 0, 562 .size_bits = 8 }, 563 { CLASSPORTINFO_REC_FIELD(class_version), 564 .offset_words = 0, 565 .offset_bits = 8, 566 .size_bits = 8 }, 567 { CLASSPORTINFO_REC_FIELD(capability_mask), 568 .offset_words = 0, 569 .offset_bits = 16, 570 .size_bits = 16 }, 571 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 572 .offset_words = 1, 573 .offset_bits = 0, 574 .size_bits = 32 }, 575 { CLASSPORTINFO_REC_FIELD(redirect_gid), 576 .offset_words = 2, 577 .offset_bits = 0, 578 .size_bits = 128 }, 579 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl), 580 .offset_words = 6, 581 .offset_bits = 0, 582 .size_bits = 32 }, 583 { CLASSPORTINFO_REC_FIELD(redirect_lid), 584 .offset_words = 7, 585 .offset_bits = 0, 586 .size_bits = 16 }, 587 { CLASSPORTINFO_REC_FIELD(redirect_pkey), 588 .offset_words = 7, 589 .offset_bits = 16, 590 .size_bits = 16 }, 591 592 { CLASSPORTINFO_REC_FIELD(redirect_qp), 593 .offset_words = 8, 594 .offset_bits = 0, 595 .size_bits = 32 }, 596 { CLASSPORTINFO_REC_FIELD(redirect_qkey), 597 .offset_words = 9, 598 .offset_bits = 0, 599 .size_bits = 32 }, 600 601 { CLASSPORTINFO_REC_FIELD(trap_gid), 602 .offset_words = 10, 603 .offset_bits = 0, 604 .size_bits = 128 }, 605 { CLASSPORTINFO_REC_FIELD(trap_tcslfl), 606 .offset_words = 14, 607 .offset_bits = 0, 608 .size_bits = 32 }, 609 610 { CLASSPORTINFO_REC_FIELD(trap_lid), 611 .offset_words = 15, 612 .offset_bits = 0, 613 .size_bits = 16 }, 614 { CLASSPORTINFO_REC_FIELD(trap_pkey), 615 .offset_words = 15, 616 .offset_bits = 16, 617 .size_bits = 16 }, 618 619 { CLASSPORTINFO_REC_FIELD(trap_hlqp), 620 .offset_words = 16, 621 .offset_bits = 0, 622 .size_bits = 32 }, 623 { CLASSPORTINFO_REC_FIELD(trap_qkey), 624 .offset_words = 17, 625 .offset_bits = 0, 626 .size_bits = 32 }, 627 }; 628 629 #define OPA_CLASSPORTINFO_REC_FIELD(field) \ 630 .struct_offset_bytes =\ 631 offsetof(struct opa_class_port_info, field), \ 632 .struct_size_bytes = \ 633 sizeof((struct opa_class_port_info *)0)->field, \ 634 .field_name = "opa_class_port_info:" #field 635 636 static const struct ib_field opa_classport_info_rec_table[] = { 637 { OPA_CLASSPORTINFO_REC_FIELD(base_version), 638 .offset_words = 0, 639 .offset_bits = 0, 640 .size_bits = 8 }, 641 { OPA_CLASSPORTINFO_REC_FIELD(class_version), 642 .offset_words = 0, 643 .offset_bits = 8, 644 .size_bits = 8 }, 645 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask), 646 .offset_words = 0, 647 .offset_bits = 16, 648 .size_bits = 16 }, 649 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 650 .offset_words = 1, 651 .offset_bits = 0, 652 .size_bits = 32 }, 653 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid), 654 .offset_words = 2, 655 .offset_bits = 0, 656 .size_bits = 128 }, 657 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl), 658 .offset_words = 6, 659 .offset_bits = 0, 660 .size_bits = 32 }, 661 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid), 662 .offset_words = 7, 663 .offset_bits = 0, 664 .size_bits = 32 }, 665 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp), 666 .offset_words = 8, 667 .offset_bits = 0, 668 .size_bits = 32 }, 669 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey), 670 .offset_words = 9, 671 .offset_bits = 0, 672 .size_bits = 32 }, 673 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid), 674 .offset_words = 10, 675 .offset_bits = 0, 676 .size_bits = 128 }, 677 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl), 678 .offset_words = 14, 679 .offset_bits = 0, 680 .size_bits = 32 }, 681 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid), 682 .offset_words = 15, 683 .offset_bits = 0, 684 .size_bits = 32 }, 685 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp), 686 .offset_words = 16, 687 .offset_bits = 0, 688 .size_bits = 32 }, 689 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey), 690 .offset_words = 17, 691 .offset_bits = 0, 692 .size_bits = 32 }, 693 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey), 694 .offset_words = 18, 695 .offset_bits = 0, 696 .size_bits = 16 }, 697 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey), 698 .offset_words = 18, 699 .offset_bits = 16, 700 .size_bits = 16 }, 701 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd), 702 .offset_words = 19, 703 .offset_bits = 0, 704 .size_bits = 8 }, 705 { RESERVED, 706 .offset_words = 19, 707 .offset_bits = 8, 708 .size_bits = 24 }, 709 }; 710 711 #define GUIDINFO_REC_FIELD(field) \ 712 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ 713 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ 714 .field_name = "sa_guidinfo_rec:" #field 715 716 static const struct ib_field guidinfo_rec_table[] = { 717 { GUIDINFO_REC_FIELD(lid), 718 .offset_words = 0, 719 .offset_bits = 0, 720 .size_bits = 16 }, 721 { GUIDINFO_REC_FIELD(block_num), 722 .offset_words = 0, 723 .offset_bits = 16, 724 .size_bits = 8 }, 725 { GUIDINFO_REC_FIELD(res1), 726 .offset_words = 0, 727 .offset_bits = 24, 728 .size_bits = 8 }, 729 { GUIDINFO_REC_FIELD(res2), 730 .offset_words = 1, 731 .offset_bits = 0, 732 .size_bits = 32 }, 733 { GUIDINFO_REC_FIELD(guid_info_list), 734 .offset_words = 2, 735 .offset_bits = 0, 736 .size_bits = 512 }, 737 }; 738 739 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) 740 { 741 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; 742 } 743 744 static inline int ib_sa_query_cancelled(struct ib_sa_query *query) 745 { 746 return (query->flags & IB_SA_CANCEL); 747 } 748 749 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, 750 struct ib_sa_query *query) 751 { 752 struct sa_path_rec *sa_rec = query->mad_buf->context[1]; 753 struct ib_sa_mad *mad = query->mad_buf->mad; 754 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; 755 u16 val16; 756 u64 val64; 757 struct rdma_ls_resolve_header *header; 758 759 query->mad_buf->context[1] = NULL; 760 761 /* Construct the family header first */ 762 header = (struct rdma_ls_resolve_header *) 763 skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 764 memcpy(header->device_name, query->port->agent->device->name, 765 LS_DEVICE_NAME_MAX); 766 header->port_num = query->port->port_num; 767 768 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && 769 sa_rec->reversible != 0) 770 query->path_use = LS_RESOLVE_PATH_USE_GMP; 771 else 772 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; 773 header->path_use = query->path_use; 774 775 /* Now build the attributes */ 776 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 777 val64 = be64_to_cpu(sa_path_get_service_id(sa_rec)); 778 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 779 sizeof(val64), &val64); 780 } 781 if (comp_mask & IB_SA_PATH_REC_DGID) 782 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID, 783 sizeof(sa_rec->dgid), &sa_rec->dgid); 784 if (comp_mask & IB_SA_PATH_REC_SGID) 785 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID, 786 sizeof(sa_rec->sgid), &sa_rec->sgid); 787 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 788 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS, 789 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class); 790 791 if (comp_mask & IB_SA_PATH_REC_PKEY) { 792 val16 = be16_to_cpu(sa_rec->pkey); 793 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY, 794 sizeof(val16), &val16); 795 } 796 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) { 797 val16 = be16_to_cpu(sa_rec->qos_class); 798 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS, 799 sizeof(val16), &val16); 800 } 801 } 802 803 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) 804 { 805 int len = 0; 806 807 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) 808 len += nla_total_size(sizeof(u64)); 809 if (comp_mask & IB_SA_PATH_REC_DGID) 810 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 811 if (comp_mask & IB_SA_PATH_REC_SGID) 812 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 813 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 814 len += nla_total_size(sizeof(u8)); 815 if (comp_mask & IB_SA_PATH_REC_PKEY) 816 len += nla_total_size(sizeof(u16)); 817 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) 818 len += nla_total_size(sizeof(u16)); 819 820 /* 821 * Make sure that at least some of the required comp_mask bits are 822 * set. 823 */ 824 if (WARN_ON(len == 0)) 825 return len; 826 827 /* Add the family header */ 828 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header)); 829 830 return len; 831 } 832 833 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) 834 { 835 struct sk_buff *skb = NULL; 836 struct nlmsghdr *nlh; 837 void *data; 838 int ret = 0; 839 struct ib_sa_mad *mad; 840 int len; 841 842 mad = query->mad_buf->mad; 843 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); 844 if (len <= 0) 845 return -EMSGSIZE; 846 847 skb = nlmsg_new(len, gfp_mask); 848 if (!skb) 849 return -ENOMEM; 850 851 /* Put nlmsg header only for now */ 852 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, 853 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); 854 if (!data) { 855 nlmsg_free(skb); 856 return -EMSGSIZE; 857 } 858 859 /* Add attributes */ 860 ib_nl_set_path_rec_attrs(skb, query); 861 862 /* Repair the nlmsg header length */ 863 nlmsg_end(skb, nlh); 864 865 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask); 866 if (!ret) 867 ret = len; 868 else 869 ret = 0; 870 871 return ret; 872 } 873 874 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) 875 { 876 unsigned long flags; 877 unsigned long delay; 878 int ret; 879 880 INIT_LIST_HEAD(&query->list); 881 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 882 883 /* Put the request on the list first.*/ 884 spin_lock_irqsave(&ib_nl_request_lock, flags); 885 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 886 query->timeout = delay + jiffies; 887 list_add_tail(&query->list, &ib_nl_request_list); 888 /* Start the timeout if this is the only request */ 889 if (ib_nl_request_list.next == &query->list) 890 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 891 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 892 893 ret = ib_nl_send_msg(query, gfp_mask); 894 if (ret <= 0) { 895 ret = -EIO; 896 /* Remove the request */ 897 spin_lock_irqsave(&ib_nl_request_lock, flags); 898 list_del(&query->list); 899 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 900 } else { 901 ret = 0; 902 } 903 904 return ret; 905 } 906 907 static int ib_nl_cancel_request(struct ib_sa_query *query) 908 { 909 unsigned long flags; 910 struct ib_sa_query *wait_query; 911 int found = 0; 912 913 spin_lock_irqsave(&ib_nl_request_lock, flags); 914 list_for_each_entry(wait_query, &ib_nl_request_list, list) { 915 /* Let the timeout to take care of the callback */ 916 if (query == wait_query) { 917 query->flags |= IB_SA_CANCEL; 918 query->timeout = jiffies; 919 list_move(&query->list, &ib_nl_request_list); 920 found = 1; 921 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1); 922 break; 923 } 924 } 925 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 926 927 return found; 928 } 929 930 static void send_handler(struct ib_mad_agent *agent, 931 struct ib_mad_send_wc *mad_send_wc); 932 933 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, 934 const struct nlmsghdr *nlh) 935 { 936 struct ib_mad_send_wc mad_send_wc; 937 struct ib_sa_mad *mad = NULL; 938 const struct nlattr *head, *curr; 939 struct ib_path_rec_data *rec; 940 int len, rem; 941 u32 mask = 0; 942 int status = -EIO; 943 944 if (query->callback) { 945 head = (const struct nlattr *) nlmsg_data(nlh); 946 len = nlmsg_len(nlh); 947 switch (query->path_use) { 948 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL: 949 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND; 950 break; 951 952 case LS_RESOLVE_PATH_USE_ALL: 953 case LS_RESOLVE_PATH_USE_GMP: 954 default: 955 mask = IB_PATH_PRIMARY | IB_PATH_GMP | 956 IB_PATH_BIDIRECTIONAL; 957 break; 958 } 959 nla_for_each_attr(curr, head, len, rem) { 960 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) { 961 rec = nla_data(curr); 962 /* 963 * Get the first one. In the future, we may 964 * need to get up to 6 pathrecords. 965 */ 966 if ((rec->flags & mask) == mask) { 967 mad = query->mad_buf->mad; 968 mad->mad_hdr.method |= 969 IB_MGMT_METHOD_RESP; 970 memcpy(mad->data, rec->path_rec, 971 sizeof(rec->path_rec)); 972 status = 0; 973 break; 974 } 975 } 976 } 977 query->callback(query, status, mad); 978 } 979 980 mad_send_wc.send_buf = query->mad_buf; 981 mad_send_wc.status = IB_WC_SUCCESS; 982 send_handler(query->mad_buf->mad_agent, &mad_send_wc); 983 } 984 985 static void ib_nl_request_timeout(struct work_struct *work) 986 { 987 unsigned long flags; 988 struct ib_sa_query *query; 989 unsigned long delay; 990 struct ib_mad_send_wc mad_send_wc; 991 int ret; 992 993 spin_lock_irqsave(&ib_nl_request_lock, flags); 994 while (!list_empty(&ib_nl_request_list)) { 995 query = list_entry(ib_nl_request_list.next, 996 struct ib_sa_query, list); 997 998 if (time_after(query->timeout, jiffies)) { 999 delay = query->timeout - jiffies; 1000 if ((long)delay <= 0) 1001 delay = 1; 1002 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 1003 break; 1004 } 1005 1006 list_del(&query->list); 1007 ib_sa_disable_local_svc(query); 1008 /* Hold the lock to protect against query cancellation */ 1009 if (ib_sa_query_cancelled(query)) 1010 ret = -1; 1011 else 1012 ret = ib_post_send_mad(query->mad_buf, NULL); 1013 if (ret) { 1014 mad_send_wc.send_buf = query->mad_buf; 1015 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 1016 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1017 send_handler(query->port->agent, &mad_send_wc); 1018 spin_lock_irqsave(&ib_nl_request_lock, flags); 1019 } 1020 } 1021 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1022 } 1023 1024 int ib_nl_handle_set_timeout(struct sk_buff *skb, 1025 struct netlink_callback *cb) 1026 { 1027 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; 1028 int timeout, delta, abs_delta; 1029 const struct nlattr *attr; 1030 unsigned long flags; 1031 struct ib_sa_query *query; 1032 long delay = 0; 1033 struct nlattr *tb[LS_NLA_TYPE_MAX]; 1034 int ret; 1035 1036 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) || 1037 !(NETLINK_CB(skb).sk) || 1038 !netlink_capable(skb, CAP_NET_ADMIN)) 1039 return -EPERM; 1040 1041 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 1042 nlmsg_len(nlh), ib_nl_policy, NULL); 1043 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; 1044 if (ret || !attr) 1045 goto settimeout_out; 1046 1047 timeout = *(int *) nla_data(attr); 1048 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN) 1049 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN; 1050 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) 1051 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; 1052 1053 delta = timeout - sa_local_svc_timeout_ms; 1054 if (delta < 0) 1055 abs_delta = -delta; 1056 else 1057 abs_delta = delta; 1058 1059 if (delta != 0) { 1060 spin_lock_irqsave(&ib_nl_request_lock, flags); 1061 sa_local_svc_timeout_ms = timeout; 1062 list_for_each_entry(query, &ib_nl_request_list, list) { 1063 if (delta < 0 && abs_delta > query->timeout) 1064 query->timeout = 0; 1065 else 1066 query->timeout += delta; 1067 1068 /* Get the new delay from the first entry */ 1069 if (!delay) { 1070 delay = query->timeout - jiffies; 1071 if (delay <= 0) 1072 delay = 1; 1073 } 1074 } 1075 if (delay) 1076 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1077 (unsigned long)delay); 1078 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1079 } 1080 1081 settimeout_out: 1082 return skb->len; 1083 } 1084 1085 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) 1086 { 1087 struct nlattr *tb[LS_NLA_TYPE_MAX]; 1088 int ret; 1089 1090 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) 1091 return 0; 1092 1093 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 1094 nlmsg_len(nlh), ib_nl_policy, NULL); 1095 if (ret) 1096 return 0; 1097 1098 return 1; 1099 } 1100 1101 int ib_nl_handle_resolve_resp(struct sk_buff *skb, 1102 struct netlink_callback *cb) 1103 { 1104 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; 1105 unsigned long flags; 1106 struct ib_sa_query *query; 1107 struct ib_mad_send_buf *send_buf; 1108 struct ib_mad_send_wc mad_send_wc; 1109 int found = 0; 1110 int ret; 1111 1112 if ((nlh->nlmsg_flags & NLM_F_REQUEST) || 1113 !(NETLINK_CB(skb).sk) || 1114 !netlink_capable(skb, CAP_NET_ADMIN)) 1115 return -EPERM; 1116 1117 spin_lock_irqsave(&ib_nl_request_lock, flags); 1118 list_for_each_entry(query, &ib_nl_request_list, list) { 1119 /* 1120 * If the query is cancelled, let the timeout routine 1121 * take care of it. 1122 */ 1123 if (nlh->nlmsg_seq == query->seq) { 1124 found = !ib_sa_query_cancelled(query); 1125 if (found) 1126 list_del(&query->list); 1127 break; 1128 } 1129 } 1130 1131 if (!found) { 1132 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1133 goto resp_out; 1134 } 1135 1136 send_buf = query->mad_buf; 1137 1138 if (!ib_nl_is_good_resolve_resp(nlh)) { 1139 /* if the result is a failure, send out the packet via IB */ 1140 ib_sa_disable_local_svc(query); 1141 ret = ib_post_send_mad(query->mad_buf, NULL); 1142 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1143 if (ret) { 1144 mad_send_wc.send_buf = send_buf; 1145 mad_send_wc.status = IB_WC_GENERAL_ERR; 1146 send_handler(query->port->agent, &mad_send_wc); 1147 } 1148 } else { 1149 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1150 ib_nl_process_good_resolve_rsp(query, nlh); 1151 } 1152 1153 resp_out: 1154 return skb->len; 1155 } 1156 1157 static void free_sm_ah(struct kref *kref) 1158 { 1159 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 1160 1161 rdma_destroy_ah(sm_ah->ah); 1162 kfree(sm_ah); 1163 } 1164 1165 void ib_sa_register_client(struct ib_sa_client *client) 1166 { 1167 atomic_set(&client->users, 1); 1168 init_completion(&client->comp); 1169 } 1170 EXPORT_SYMBOL(ib_sa_register_client); 1171 1172 void ib_sa_unregister_client(struct ib_sa_client *client) 1173 { 1174 ib_sa_client_put(client); 1175 wait_for_completion(&client->comp); 1176 } 1177 EXPORT_SYMBOL(ib_sa_unregister_client); 1178 1179 /** 1180 * ib_sa_cancel_query - try to cancel an SA query 1181 * @id:ID of query to cancel 1182 * @query:query pointer to cancel 1183 * 1184 * Try to cancel an SA query. If the id and query don't match up or 1185 * the query has already completed, nothing is done. Otherwise the 1186 * query is canceled and will complete with a status of -EINTR. 1187 */ 1188 void ib_sa_cancel_query(int id, struct ib_sa_query *query) 1189 { 1190 unsigned long flags; 1191 struct ib_mad_agent *agent; 1192 struct ib_mad_send_buf *mad_buf; 1193 1194 spin_lock_irqsave(&idr_lock, flags); 1195 if (idr_find(&query_idr, id) != query) { 1196 spin_unlock_irqrestore(&idr_lock, flags); 1197 return; 1198 } 1199 agent = query->port->agent; 1200 mad_buf = query->mad_buf; 1201 spin_unlock_irqrestore(&idr_lock, flags); 1202 1203 /* 1204 * If the query is still on the netlink request list, schedule 1205 * it to be cancelled by the timeout routine. Otherwise, it has been 1206 * sent to the MAD layer and has to be cancelled from there. 1207 */ 1208 if (!ib_nl_cancel_request(query)) 1209 ib_cancel_mad(agent, mad_buf); 1210 } 1211 EXPORT_SYMBOL(ib_sa_cancel_query); 1212 1213 static u8 get_src_path_mask(struct ib_device *device, u8 port_num) 1214 { 1215 struct ib_sa_device *sa_dev; 1216 struct ib_sa_port *port; 1217 unsigned long flags; 1218 u8 src_path_mask; 1219 1220 sa_dev = ib_get_client_data(device, &sa_client); 1221 if (!sa_dev) 1222 return 0x7f; 1223 1224 port = &sa_dev->port[port_num - sa_dev->start_port]; 1225 spin_lock_irqsave(&port->ah_lock, flags); 1226 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; 1227 spin_unlock_irqrestore(&port->ah_lock, flags); 1228 1229 return src_path_mask; 1230 } 1231 1232 int ib_init_ah_from_path(struct ib_device *device, u8 port_num, 1233 struct sa_path_rec *rec, 1234 struct rdma_ah_attr *ah_attr) 1235 { 1236 int ret; 1237 u16 gid_index; 1238 int use_roce; 1239 struct net_device *ndev = NULL; 1240 1241 memset(ah_attr, 0, sizeof *ah_attr); 1242 ah_attr->type = rdma_ah_find_type(device, port_num); 1243 1244 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec))); 1245 rdma_ah_set_sl(ah_attr, rec->sl); 1246 rdma_ah_set_path_bits(ah_attr, be32_to_cpu(sa_path_get_slid(rec)) & 1247 get_src_path_mask(device, port_num)); 1248 rdma_ah_set_port_num(ah_attr, port_num); 1249 rdma_ah_set_static_rate(ah_attr, rec->rate); 1250 use_roce = rdma_cap_eth_ah(device, port_num); 1251 1252 if (use_roce) { 1253 struct net_device *idev; 1254 struct net_device *resolved_dev; 1255 struct rdma_dev_addr dev_addr = { 1256 .bound_dev_if = ((sa_path_get_ifindex(rec) >= 0) ? 1257 sa_path_get_ifindex(rec) : 0), 1258 .net = sa_path_get_ndev(rec) ? 1259 sa_path_get_ndev(rec) : 1260 &init_net 1261 }; 1262 union { 1263 struct sockaddr _sockaddr; 1264 struct sockaddr_in _sockaddr_in; 1265 struct sockaddr_in6 _sockaddr_in6; 1266 } sgid_addr, dgid_addr; 1267 1268 if (!device->get_netdev) 1269 return -EOPNOTSUPP; 1270 1271 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid); 1272 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid); 1273 1274 /* validate the route */ 1275 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr, 1276 &dgid_addr._sockaddr, &dev_addr); 1277 if (ret) 1278 return ret; 1279 1280 if ((dev_addr.network == RDMA_NETWORK_IPV4 || 1281 dev_addr.network == RDMA_NETWORK_IPV6) && 1282 rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2) 1283 return -EINVAL; 1284 1285 idev = device->get_netdev(device, port_num); 1286 if (!idev) 1287 return -ENODEV; 1288 1289 resolved_dev = dev_get_by_index(dev_addr.net, 1290 dev_addr.bound_dev_if); 1291 if (resolved_dev->flags & IFF_LOOPBACK) { 1292 dev_put(resolved_dev); 1293 resolved_dev = idev; 1294 dev_hold(resolved_dev); 1295 } 1296 ndev = ib_get_ndev_from_path(rec); 1297 rcu_read_lock(); 1298 if ((ndev && ndev != resolved_dev) || 1299 (resolved_dev != idev && 1300 !rdma_is_upper_dev_rcu(idev, resolved_dev))) 1301 ret = -EHOSTUNREACH; 1302 rcu_read_unlock(); 1303 dev_put(idev); 1304 dev_put(resolved_dev); 1305 if (ret) { 1306 if (ndev) 1307 dev_put(ndev); 1308 return ret; 1309 } 1310 } 1311 1312 if (rec->hop_limit > 0 || use_roce) { 1313 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec); 1314 1315 ret = ib_find_cached_gid_by_port(device, &rec->sgid, type, 1316 port_num, ndev, &gid_index); 1317 if (ret) { 1318 if (ndev) 1319 dev_put(ndev); 1320 return ret; 1321 } 1322 1323 rdma_ah_set_grh(ah_attr, &rec->dgid, 1324 be32_to_cpu(rec->flow_label), 1325 gid_index, rec->hop_limit, 1326 rec->traffic_class); 1327 if (ndev) 1328 dev_put(ndev); 1329 } 1330 1331 if (use_roce) { 1332 u8 *dmac = sa_path_get_dmac(rec); 1333 1334 if (!dmac) 1335 return -EINVAL; 1336 memcpy(ah_attr->roce.dmac, dmac, ETH_ALEN); 1337 } 1338 1339 return 0; 1340 } 1341 EXPORT_SYMBOL(ib_init_ah_from_path); 1342 1343 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) 1344 { 1345 unsigned long flags; 1346 1347 spin_lock_irqsave(&query->port->ah_lock, flags); 1348 if (!query->port->sm_ah) { 1349 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1350 return -EAGAIN; 1351 } 1352 kref_get(&query->port->sm_ah->ref); 1353 query->sm_ah = query->port->sm_ah; 1354 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1355 1356 query->mad_buf = ib_create_send_mad(query->port->agent, 1, 1357 query->sm_ah->pkey_index, 1358 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 1359 gfp_mask, 1360 ((query->flags & IB_SA_QUERY_OPA) ? 1361 OPA_MGMT_BASE_VERSION : 1362 IB_MGMT_BASE_VERSION)); 1363 if (IS_ERR(query->mad_buf)) { 1364 kref_put(&query->sm_ah->ref, free_sm_ah); 1365 return -ENOMEM; 1366 } 1367 1368 query->mad_buf->ah = query->sm_ah->ah; 1369 1370 return 0; 1371 } 1372 1373 static void free_mad(struct ib_sa_query *query) 1374 { 1375 ib_free_send_mad(query->mad_buf); 1376 kref_put(&query->sm_ah->ref, free_sm_ah); 1377 } 1378 1379 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent) 1380 { 1381 struct ib_sa_mad *mad = query->mad_buf->mad; 1382 unsigned long flags; 1383 1384 memset(mad, 0, sizeof *mad); 1385 1386 if (query->flags & IB_SA_QUERY_OPA) { 1387 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION; 1388 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION; 1389 } else { 1390 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; 1391 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; 1392 } 1393 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 1394 spin_lock_irqsave(&tid_lock, flags); 1395 mad->mad_hdr.tid = 1396 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); 1397 spin_unlock_irqrestore(&tid_lock, flags); 1398 } 1399 1400 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) 1401 { 1402 bool preload = gfpflags_allow_blocking(gfp_mask); 1403 unsigned long flags; 1404 int ret, id; 1405 1406 if (preload) 1407 idr_preload(gfp_mask); 1408 spin_lock_irqsave(&idr_lock, flags); 1409 1410 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT); 1411 1412 spin_unlock_irqrestore(&idr_lock, flags); 1413 if (preload) 1414 idr_preload_end(); 1415 if (id < 0) 1416 return id; 1417 1418 query->mad_buf->timeout_ms = timeout_ms; 1419 query->mad_buf->context[0] = query; 1420 query->id = id; 1421 1422 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) && 1423 (!(query->flags & IB_SA_QUERY_OPA))) { 1424 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) { 1425 if (!ib_nl_make_request(query, gfp_mask)) 1426 return id; 1427 } 1428 ib_sa_disable_local_svc(query); 1429 } 1430 1431 ret = ib_post_send_mad(query->mad_buf, NULL); 1432 if (ret) { 1433 spin_lock_irqsave(&idr_lock, flags); 1434 idr_remove(&query_idr, id); 1435 spin_unlock_irqrestore(&idr_lock, flags); 1436 } 1437 1438 /* 1439 * It's not safe to dereference query any more, because the 1440 * send may already have completed and freed the query in 1441 * another context. 1442 */ 1443 return ret ? ret : id; 1444 } 1445 1446 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec) 1447 { 1448 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); 1449 } 1450 EXPORT_SYMBOL(ib_sa_unpack_path); 1451 1452 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute) 1453 { 1454 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); 1455 } 1456 EXPORT_SYMBOL(ib_sa_pack_path); 1457 1458 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client, 1459 struct ib_device *device, 1460 u8 port_num) 1461 { 1462 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1463 struct ib_sa_port *port; 1464 unsigned long flags; 1465 bool ret = false; 1466 1467 if (!sa_dev) 1468 return ret; 1469 1470 port = &sa_dev->port[port_num - sa_dev->start_port]; 1471 spin_lock_irqsave(&port->classport_lock, flags); 1472 if (!port->classport_info.valid) 1473 goto ret; 1474 1475 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA) 1476 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) & 1477 OPA_CLASS_PORT_INFO_PR_SUPPORT; 1478 ret: 1479 spin_unlock_irqrestore(&port->classport_lock, flags); 1480 return ret; 1481 } 1482 1483 enum opa_pr_supported { 1484 PR_NOT_SUPPORTED, 1485 PR_OPA_SUPPORTED, 1486 PR_IB_SUPPORTED 1487 }; 1488 1489 /** 1490 * Check if current PR query can be an OPA query. 1491 * Retuns PR_NOT_SUPPORTED if a path record query is not 1492 * possible, PR_OPA_SUPPORTED if an OPA path record query 1493 * is possible and PR_IB_SUPPORTED if an IB path record 1494 * query is possible. 1495 */ 1496 static int opa_pr_query_possible(struct ib_sa_client *client, 1497 struct ib_device *device, 1498 u8 port_num, 1499 struct sa_path_rec *rec) 1500 { 1501 struct ib_port_attr port_attr; 1502 1503 if (ib_query_port(device, port_num, &port_attr)) 1504 return PR_NOT_SUPPORTED; 1505 1506 if (ib_sa_opa_pathrecord_support(client, device, port_num)) 1507 return PR_OPA_SUPPORTED; 1508 1509 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 1510 return PR_NOT_SUPPORTED; 1511 else 1512 return PR_IB_SUPPORTED; 1513 } 1514 1515 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 1516 int status, 1517 struct ib_sa_mad *mad) 1518 { 1519 struct ib_sa_path_query *query = 1520 container_of(sa_query, struct ib_sa_path_query, sa_query); 1521 1522 if (mad) { 1523 struct sa_path_rec rec; 1524 1525 if (sa_query->flags & IB_SA_QUERY_OPA) { 1526 ib_unpack(opa_path_rec_table, 1527 ARRAY_SIZE(opa_path_rec_table), 1528 mad->data, &rec); 1529 rec.rec_type = SA_PATH_REC_TYPE_OPA; 1530 query->callback(status, &rec, query->context); 1531 } else { 1532 ib_unpack(path_rec_table, 1533 ARRAY_SIZE(path_rec_table), 1534 mad->data, &rec); 1535 rec.rec_type = SA_PATH_REC_TYPE_IB; 1536 sa_path_set_ndev(&rec, NULL); 1537 sa_path_set_ifindex(&rec, 0); 1538 sa_path_set_dmac_zero(&rec); 1539 1540 if (query->conv_pr) { 1541 struct sa_path_rec opa; 1542 1543 memset(&opa, 0, sizeof(struct sa_path_rec)); 1544 sa_convert_path_ib_to_opa(&opa, &rec); 1545 query->callback(status, &opa, query->context); 1546 } else { 1547 query->callback(status, &rec, query->context); 1548 } 1549 } 1550 } else 1551 query->callback(status, NULL, query->context); 1552 } 1553 1554 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 1555 { 1556 struct ib_sa_path_query *query = 1557 container_of(sa_query, struct ib_sa_path_query, sa_query); 1558 1559 kfree(query->conv_pr); 1560 kfree(query); 1561 } 1562 1563 /** 1564 * ib_sa_path_rec_get - Start a Path get query 1565 * @client:SA client 1566 * @device:device to send query on 1567 * @port_num: port number to send query on 1568 * @rec:Path Record to send in query 1569 * @comp_mask:component mask to send in query 1570 * @timeout_ms:time to wait for response 1571 * @gfp_mask:GFP mask to use for internal allocations 1572 * @callback:function called when query completes, times out or is 1573 * canceled 1574 * @context:opaque user context passed to callback 1575 * @sa_query:query context, used to cancel query 1576 * 1577 * Send a Path Record Get query to the SA to look up a path. The 1578 * callback function will be called when the query completes (or 1579 * fails); status is 0 for a successful response, -EINTR if the query 1580 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1581 * occurred sending the query. The resp parameter of the callback is 1582 * only valid if status is 0. 1583 * 1584 * If the return value of ib_sa_path_rec_get() is negative, it is an 1585 * error code. Otherwise it is a query ID that can be used to cancel 1586 * the query. 1587 */ 1588 int ib_sa_path_rec_get(struct ib_sa_client *client, 1589 struct ib_device *device, u8 port_num, 1590 struct sa_path_rec *rec, 1591 ib_sa_comp_mask comp_mask, 1592 int timeout_ms, gfp_t gfp_mask, 1593 void (*callback)(int status, 1594 struct sa_path_rec *resp, 1595 void *context), 1596 void *context, 1597 struct ib_sa_query **sa_query) 1598 { 1599 struct ib_sa_path_query *query; 1600 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1601 struct ib_sa_port *port; 1602 struct ib_mad_agent *agent; 1603 struct ib_sa_mad *mad; 1604 enum opa_pr_supported status; 1605 int ret; 1606 1607 if (!sa_dev) 1608 return -ENODEV; 1609 1610 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) && 1611 (rec->rec_type != SA_PATH_REC_TYPE_OPA)) 1612 return -EINVAL; 1613 1614 port = &sa_dev->port[port_num - sa_dev->start_port]; 1615 agent = port->agent; 1616 1617 query = kzalloc(sizeof(*query), gfp_mask); 1618 if (!query) 1619 return -ENOMEM; 1620 1621 query->sa_query.port = port; 1622 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { 1623 status = opa_pr_query_possible(client, device, port_num, rec); 1624 if (status == PR_NOT_SUPPORTED) { 1625 ret = -EINVAL; 1626 goto err1; 1627 } else if (status == PR_OPA_SUPPORTED) { 1628 query->sa_query.flags |= IB_SA_QUERY_OPA; 1629 } else { 1630 query->conv_pr = 1631 kmalloc(sizeof(*query->conv_pr), gfp_mask); 1632 if (!query->conv_pr) { 1633 ret = -ENOMEM; 1634 goto err1; 1635 } 1636 } 1637 } 1638 1639 ret = alloc_mad(&query->sa_query, gfp_mask); 1640 if (ret) 1641 goto err2; 1642 1643 ib_sa_client_get(client); 1644 query->sa_query.client = client; 1645 query->callback = callback; 1646 query->context = context; 1647 1648 mad = query->sa_query.mad_buf->mad; 1649 init_mad(&query->sa_query, agent); 1650 1651 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 1652 query->sa_query.release = ib_sa_path_rec_release; 1653 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1654 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 1655 mad->sa_hdr.comp_mask = comp_mask; 1656 1657 if (query->sa_query.flags & IB_SA_QUERY_OPA) { 1658 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), 1659 rec, mad->data); 1660 } else if (query->conv_pr) { 1661 sa_convert_path_opa_to_ib(query->conv_pr, rec); 1662 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 1663 query->conv_pr, mad->data); 1664 } else { 1665 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 1666 rec, mad->data); 1667 } 1668 1669 *sa_query = &query->sa_query; 1670 1671 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; 1672 query->sa_query.mad_buf->context[1] = (query->conv_pr) ? 1673 query->conv_pr : rec; 1674 1675 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1676 if (ret < 0) 1677 goto err3; 1678 1679 return ret; 1680 1681 err3: 1682 *sa_query = NULL; 1683 ib_sa_client_put(query->sa_query.client); 1684 free_mad(&query->sa_query); 1685 err2: 1686 kfree(query->conv_pr); 1687 err1: 1688 kfree(query); 1689 return ret; 1690 } 1691 EXPORT_SYMBOL(ib_sa_path_rec_get); 1692 1693 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, 1694 int status, 1695 struct ib_sa_mad *mad) 1696 { 1697 struct ib_sa_service_query *query = 1698 container_of(sa_query, struct ib_sa_service_query, sa_query); 1699 1700 if (mad) { 1701 struct ib_sa_service_rec rec; 1702 1703 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), 1704 mad->data, &rec); 1705 query->callback(status, &rec, query->context); 1706 } else 1707 query->callback(status, NULL, query->context); 1708 } 1709 1710 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) 1711 { 1712 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); 1713 } 1714 1715 /** 1716 * ib_sa_service_rec_query - Start Service Record operation 1717 * @client:SA client 1718 * @device:device to send request on 1719 * @port_num: port number to send request on 1720 * @method:SA method - should be get, set, or delete 1721 * @rec:Service Record to send in request 1722 * @comp_mask:component mask to send in request 1723 * @timeout_ms:time to wait for response 1724 * @gfp_mask:GFP mask to use for internal allocations 1725 * @callback:function called when request completes, times out or is 1726 * canceled 1727 * @context:opaque user context passed to callback 1728 * @sa_query:request context, used to cancel request 1729 * 1730 * Send a Service Record set/get/delete to the SA to register, 1731 * unregister or query a service record. 1732 * The callback function will be called when the request completes (or 1733 * fails); status is 0 for a successful response, -EINTR if the query 1734 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1735 * occurred sending the query. The resp parameter of the callback is 1736 * only valid if status is 0. 1737 * 1738 * If the return value of ib_sa_service_rec_query() is negative, it is an 1739 * error code. Otherwise it is a request ID that can be used to cancel 1740 * the query. 1741 */ 1742 int ib_sa_service_rec_query(struct ib_sa_client *client, 1743 struct ib_device *device, u8 port_num, u8 method, 1744 struct ib_sa_service_rec *rec, 1745 ib_sa_comp_mask comp_mask, 1746 int timeout_ms, gfp_t gfp_mask, 1747 void (*callback)(int status, 1748 struct ib_sa_service_rec *resp, 1749 void *context), 1750 void *context, 1751 struct ib_sa_query **sa_query) 1752 { 1753 struct ib_sa_service_query *query; 1754 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1755 struct ib_sa_port *port; 1756 struct ib_mad_agent *agent; 1757 struct ib_sa_mad *mad; 1758 int ret; 1759 1760 if (!sa_dev) 1761 return -ENODEV; 1762 1763 port = &sa_dev->port[port_num - sa_dev->start_port]; 1764 agent = port->agent; 1765 1766 if (method != IB_MGMT_METHOD_GET && 1767 method != IB_MGMT_METHOD_SET && 1768 method != IB_SA_METHOD_DELETE) 1769 return -EINVAL; 1770 1771 query = kzalloc(sizeof(*query), gfp_mask); 1772 if (!query) 1773 return -ENOMEM; 1774 1775 query->sa_query.port = port; 1776 ret = alloc_mad(&query->sa_query, gfp_mask); 1777 if (ret) 1778 goto err1; 1779 1780 ib_sa_client_get(client); 1781 query->sa_query.client = client; 1782 query->callback = callback; 1783 query->context = context; 1784 1785 mad = query->sa_query.mad_buf->mad; 1786 init_mad(&query->sa_query, agent); 1787 1788 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; 1789 query->sa_query.release = ib_sa_service_rec_release; 1790 mad->mad_hdr.method = method; 1791 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); 1792 mad->sa_hdr.comp_mask = comp_mask; 1793 1794 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), 1795 rec, mad->data); 1796 1797 *sa_query = &query->sa_query; 1798 1799 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1800 if (ret < 0) 1801 goto err2; 1802 1803 return ret; 1804 1805 err2: 1806 *sa_query = NULL; 1807 ib_sa_client_put(query->sa_query.client); 1808 free_mad(&query->sa_query); 1809 1810 err1: 1811 kfree(query); 1812 return ret; 1813 } 1814 EXPORT_SYMBOL(ib_sa_service_rec_query); 1815 1816 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 1817 int status, 1818 struct ib_sa_mad *mad) 1819 { 1820 struct ib_sa_mcmember_query *query = 1821 container_of(sa_query, struct ib_sa_mcmember_query, sa_query); 1822 1823 if (mad) { 1824 struct ib_sa_mcmember_rec rec; 1825 1826 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1827 mad->data, &rec); 1828 query->callback(status, &rec, query->context); 1829 } else 1830 query->callback(status, NULL, query->context); 1831 } 1832 1833 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 1834 { 1835 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 1836 } 1837 1838 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 1839 struct ib_device *device, u8 port_num, 1840 u8 method, 1841 struct ib_sa_mcmember_rec *rec, 1842 ib_sa_comp_mask comp_mask, 1843 int timeout_ms, gfp_t gfp_mask, 1844 void (*callback)(int status, 1845 struct ib_sa_mcmember_rec *resp, 1846 void *context), 1847 void *context, 1848 struct ib_sa_query **sa_query) 1849 { 1850 struct ib_sa_mcmember_query *query; 1851 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1852 struct ib_sa_port *port; 1853 struct ib_mad_agent *agent; 1854 struct ib_sa_mad *mad; 1855 int ret; 1856 1857 if (!sa_dev) 1858 return -ENODEV; 1859 1860 port = &sa_dev->port[port_num - sa_dev->start_port]; 1861 agent = port->agent; 1862 1863 query = kzalloc(sizeof(*query), gfp_mask); 1864 if (!query) 1865 return -ENOMEM; 1866 1867 query->sa_query.port = port; 1868 ret = alloc_mad(&query->sa_query, gfp_mask); 1869 if (ret) 1870 goto err1; 1871 1872 ib_sa_client_get(client); 1873 query->sa_query.client = client; 1874 query->callback = callback; 1875 query->context = context; 1876 1877 mad = query->sa_query.mad_buf->mad; 1878 init_mad(&query->sa_query, agent); 1879 1880 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 1881 query->sa_query.release = ib_sa_mcmember_rec_release; 1882 mad->mad_hdr.method = method; 1883 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 1884 mad->sa_hdr.comp_mask = comp_mask; 1885 1886 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1887 rec, mad->data); 1888 1889 *sa_query = &query->sa_query; 1890 1891 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1892 if (ret < 0) 1893 goto err2; 1894 1895 return ret; 1896 1897 err2: 1898 *sa_query = NULL; 1899 ib_sa_client_put(query->sa_query.client); 1900 free_mad(&query->sa_query); 1901 1902 err1: 1903 kfree(query); 1904 return ret; 1905 } 1906 1907 /* Support GuidInfoRecord */ 1908 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, 1909 int status, 1910 struct ib_sa_mad *mad) 1911 { 1912 struct ib_sa_guidinfo_query *query = 1913 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); 1914 1915 if (mad) { 1916 struct ib_sa_guidinfo_rec rec; 1917 1918 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), 1919 mad->data, &rec); 1920 query->callback(status, &rec, query->context); 1921 } else 1922 query->callback(status, NULL, query->context); 1923 } 1924 1925 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) 1926 { 1927 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); 1928 } 1929 1930 int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 1931 struct ib_device *device, u8 port_num, 1932 struct ib_sa_guidinfo_rec *rec, 1933 ib_sa_comp_mask comp_mask, u8 method, 1934 int timeout_ms, gfp_t gfp_mask, 1935 void (*callback)(int status, 1936 struct ib_sa_guidinfo_rec *resp, 1937 void *context), 1938 void *context, 1939 struct ib_sa_query **sa_query) 1940 { 1941 struct ib_sa_guidinfo_query *query; 1942 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1943 struct ib_sa_port *port; 1944 struct ib_mad_agent *agent; 1945 struct ib_sa_mad *mad; 1946 int ret; 1947 1948 if (!sa_dev) 1949 return -ENODEV; 1950 1951 if (method != IB_MGMT_METHOD_GET && 1952 method != IB_MGMT_METHOD_SET && 1953 method != IB_SA_METHOD_DELETE) { 1954 return -EINVAL; 1955 } 1956 1957 port = &sa_dev->port[port_num - sa_dev->start_port]; 1958 agent = port->agent; 1959 1960 query = kzalloc(sizeof(*query), gfp_mask); 1961 if (!query) 1962 return -ENOMEM; 1963 1964 query->sa_query.port = port; 1965 ret = alloc_mad(&query->sa_query, gfp_mask); 1966 if (ret) 1967 goto err1; 1968 1969 ib_sa_client_get(client); 1970 query->sa_query.client = client; 1971 query->callback = callback; 1972 query->context = context; 1973 1974 mad = query->sa_query.mad_buf->mad; 1975 init_mad(&query->sa_query, agent); 1976 1977 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; 1978 query->sa_query.release = ib_sa_guidinfo_rec_release; 1979 1980 mad->mad_hdr.method = method; 1981 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); 1982 mad->sa_hdr.comp_mask = comp_mask; 1983 1984 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, 1985 mad->data); 1986 1987 *sa_query = &query->sa_query; 1988 1989 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1990 if (ret < 0) 1991 goto err2; 1992 1993 return ret; 1994 1995 err2: 1996 *sa_query = NULL; 1997 ib_sa_client_put(query->sa_query.client); 1998 free_mad(&query->sa_query); 1999 2000 err1: 2001 kfree(query); 2002 return ret; 2003 } 2004 EXPORT_SYMBOL(ib_sa_guid_info_rec_query); 2005 2006 bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client, 2007 struct ib_device *device, 2008 u8 port_num) 2009 { 2010 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 2011 struct ib_sa_port *port; 2012 bool ret = false; 2013 unsigned long flags; 2014 2015 if (!sa_dev) 2016 return ret; 2017 2018 port = &sa_dev->port[port_num - sa_dev->start_port]; 2019 2020 spin_lock_irqsave(&port->classport_lock, flags); 2021 if ((port->classport_info.valid) && 2022 (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB)) 2023 ret = ib_get_cpi_capmask2(&port->classport_info.data.ib) 2024 & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT; 2025 spin_unlock_irqrestore(&port->classport_lock, flags); 2026 return ret; 2027 } 2028 EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support); 2029 2030 struct ib_classport_info_context { 2031 struct completion done; 2032 struct ib_sa_query *sa_query; 2033 }; 2034 2035 static void ib_classportinfo_cb(void *context) 2036 { 2037 struct ib_classport_info_context *cb_ctx = context; 2038 2039 complete(&cb_ctx->done); 2040 } 2041 2042 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, 2043 int status, 2044 struct ib_sa_mad *mad) 2045 { 2046 unsigned long flags; 2047 struct ib_sa_classport_info_query *query = 2048 container_of(sa_query, struct ib_sa_classport_info_query, sa_query); 2049 struct ib_sa_classport_cache *info = &sa_query->port->classport_info; 2050 2051 if (mad) { 2052 if (sa_query->flags & IB_SA_QUERY_OPA) { 2053 struct opa_class_port_info rec; 2054 2055 ib_unpack(opa_classport_info_rec_table, 2056 ARRAY_SIZE(opa_classport_info_rec_table), 2057 mad->data, &rec); 2058 2059 spin_lock_irqsave(&sa_query->port->classport_lock, 2060 flags); 2061 if (!status && !info->valid) { 2062 memcpy(&info->data.opa, &rec, 2063 sizeof(info->data.opa)); 2064 2065 info->valid = true; 2066 info->data.type = RDMA_CLASS_PORT_INFO_OPA; 2067 } 2068 spin_unlock_irqrestore(&sa_query->port->classport_lock, 2069 flags); 2070 2071 } else { 2072 struct ib_class_port_info rec; 2073 2074 ib_unpack(ib_classport_info_rec_table, 2075 ARRAY_SIZE(ib_classport_info_rec_table), 2076 mad->data, &rec); 2077 2078 spin_lock_irqsave(&sa_query->port->classport_lock, 2079 flags); 2080 if (!status && !info->valid) { 2081 memcpy(&info->data.ib, &rec, 2082 sizeof(info->data.ib)); 2083 2084 info->valid = true; 2085 info->data.type = RDMA_CLASS_PORT_INFO_IB; 2086 } 2087 spin_unlock_irqrestore(&sa_query->port->classport_lock, 2088 flags); 2089 } 2090 } 2091 query->callback(query->context); 2092 } 2093 2094 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query) 2095 { 2096 kfree(container_of(sa_query, struct ib_sa_classport_info_query, 2097 sa_query)); 2098 } 2099 2100 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port, 2101 int timeout_ms, 2102 void (*callback)(void *context), 2103 void *context, 2104 struct ib_sa_query **sa_query) 2105 { 2106 struct ib_mad_agent *agent; 2107 struct ib_sa_classport_info_query *query; 2108 struct ib_sa_mad *mad; 2109 gfp_t gfp_mask = GFP_KERNEL; 2110 int ret; 2111 2112 agent = port->agent; 2113 2114 query = kzalloc(sizeof(*query), gfp_mask); 2115 if (!query) 2116 return -ENOMEM; 2117 2118 query->sa_query.port = port; 2119 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device, 2120 port->port_num) ? 2121 IB_SA_QUERY_OPA : 0; 2122 ret = alloc_mad(&query->sa_query, gfp_mask); 2123 if (ret) 2124 goto err_free; 2125 2126 query->callback = callback; 2127 query->context = context; 2128 2129 mad = query->sa_query.mad_buf->mad; 2130 init_mad(&query->sa_query, agent); 2131 2132 query->sa_query.callback = ib_sa_classport_info_rec_callback; 2133 query->sa_query.release = ib_sa_classport_info_rec_release; 2134 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 2135 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO); 2136 mad->sa_hdr.comp_mask = 0; 2137 *sa_query = &query->sa_query; 2138 2139 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 2140 if (ret < 0) 2141 goto err_free_mad; 2142 2143 return ret; 2144 2145 err_free_mad: 2146 *sa_query = NULL; 2147 free_mad(&query->sa_query); 2148 2149 err_free: 2150 kfree(query); 2151 return ret; 2152 } 2153 2154 static void update_ib_cpi(struct work_struct *work) 2155 { 2156 struct ib_sa_port *port = 2157 container_of(work, struct ib_sa_port, ib_cpi_work.work); 2158 struct ib_classport_info_context *cb_context; 2159 unsigned long flags; 2160 int ret; 2161 2162 /* If the classport info is valid, nothing 2163 * to do here. 2164 */ 2165 spin_lock_irqsave(&port->classport_lock, flags); 2166 if (port->classport_info.valid) { 2167 spin_unlock_irqrestore(&port->classport_lock, flags); 2168 return; 2169 } 2170 spin_unlock_irqrestore(&port->classport_lock, flags); 2171 2172 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL); 2173 if (!cb_context) 2174 goto err_nomem; 2175 2176 init_completion(&cb_context->done); 2177 2178 ret = ib_sa_classport_info_rec_query(port, 3000, 2179 ib_classportinfo_cb, cb_context, 2180 &cb_context->sa_query); 2181 if (ret < 0) 2182 goto free_cb_err; 2183 wait_for_completion(&cb_context->done); 2184 free_cb_err: 2185 kfree(cb_context); 2186 spin_lock_irqsave(&port->classport_lock, flags); 2187 2188 /* If the classport info is still not valid, the query should have 2189 * failed for some reason. Retry issuing the query 2190 */ 2191 if (!port->classport_info.valid) { 2192 port->classport_info.retry_cnt++; 2193 if (port->classport_info.retry_cnt <= 2194 IB_SA_CPI_MAX_RETRY_CNT) { 2195 unsigned long delay = 2196 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); 2197 2198 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay); 2199 } 2200 } 2201 spin_unlock_irqrestore(&port->classport_lock, flags); 2202 2203 err_nomem: 2204 return; 2205 } 2206 2207 static void send_handler(struct ib_mad_agent *agent, 2208 struct ib_mad_send_wc *mad_send_wc) 2209 { 2210 struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 2211 unsigned long flags; 2212 2213 if (query->callback) 2214 switch (mad_send_wc->status) { 2215 case IB_WC_SUCCESS: 2216 /* No callback -- already got recv */ 2217 break; 2218 case IB_WC_RESP_TIMEOUT_ERR: 2219 query->callback(query, -ETIMEDOUT, NULL); 2220 break; 2221 case IB_WC_WR_FLUSH_ERR: 2222 query->callback(query, -EINTR, NULL); 2223 break; 2224 default: 2225 query->callback(query, -EIO, NULL); 2226 break; 2227 } 2228 2229 spin_lock_irqsave(&idr_lock, flags); 2230 idr_remove(&query_idr, query->id); 2231 spin_unlock_irqrestore(&idr_lock, flags); 2232 2233 free_mad(query); 2234 if (query->client) 2235 ib_sa_client_put(query->client); 2236 query->release(query); 2237 } 2238 2239 static void recv_handler(struct ib_mad_agent *mad_agent, 2240 struct ib_mad_send_buf *send_buf, 2241 struct ib_mad_recv_wc *mad_recv_wc) 2242 { 2243 struct ib_sa_query *query; 2244 2245 if (!send_buf) 2246 return; 2247 2248 query = send_buf->context[0]; 2249 if (query->callback) { 2250 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 2251 query->callback(query, 2252 mad_recv_wc->recv_buf.mad->mad_hdr.status ? 2253 -EINVAL : 0, 2254 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); 2255 else 2256 query->callback(query, -EIO, NULL); 2257 } 2258 2259 ib_free_recv_mad(mad_recv_wc); 2260 } 2261 2262 static void update_sm_ah(struct work_struct *work) 2263 { 2264 struct ib_sa_port *port = 2265 container_of(work, struct ib_sa_port, update_task); 2266 struct ib_sa_sm_ah *new_ah; 2267 struct ib_port_attr port_attr; 2268 struct rdma_ah_attr ah_attr; 2269 2270 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { 2271 pr_warn("Couldn't query port\n"); 2272 return; 2273 } 2274 2275 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL); 2276 if (!new_ah) 2277 return; 2278 2279 kref_init(&new_ah->ref); 2280 new_ah->src_path_mask = (1 << port_attr.lmc) - 1; 2281 2282 new_ah->pkey_index = 0; 2283 if (ib_find_pkey(port->agent->device, port->port_num, 2284 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) 2285 pr_err("Couldn't find index for default PKey\n"); 2286 2287 memset(&ah_attr, 0, sizeof(ah_attr)); 2288 ah_attr.type = rdma_ah_find_type(port->agent->device, 2289 port->port_num); 2290 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid); 2291 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl); 2292 rdma_ah_set_port_num(&ah_attr, port->port_num); 2293 if (port_attr.grh_required) { 2294 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH); 2295 2296 rdma_ah_set_subnet_prefix(&ah_attr, 2297 cpu_to_be64(port_attr.subnet_prefix)); 2298 rdma_ah_set_interface_id(&ah_attr, 2299 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)); 2300 } 2301 2302 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr); 2303 if (IS_ERR(new_ah->ah)) { 2304 pr_warn("Couldn't create new SM AH\n"); 2305 kfree(new_ah); 2306 return; 2307 } 2308 2309 spin_lock_irq(&port->ah_lock); 2310 if (port->sm_ah) 2311 kref_put(&port->sm_ah->ref, free_sm_ah); 2312 port->sm_ah = new_ah; 2313 spin_unlock_irq(&port->ah_lock); 2314 } 2315 2316 static void ib_sa_event(struct ib_event_handler *handler, 2317 struct ib_event *event) 2318 { 2319 if (event->event == IB_EVENT_PORT_ERR || 2320 event->event == IB_EVENT_PORT_ACTIVE || 2321 event->event == IB_EVENT_LID_CHANGE || 2322 event->event == IB_EVENT_PKEY_CHANGE || 2323 event->event == IB_EVENT_SM_CHANGE || 2324 event->event == IB_EVENT_CLIENT_REREGISTER) { 2325 unsigned long flags; 2326 struct ib_sa_device *sa_dev = 2327 container_of(handler, typeof(*sa_dev), event_handler); 2328 u8 port_num = event->element.port_num - sa_dev->start_port; 2329 struct ib_sa_port *port = &sa_dev->port[port_num]; 2330 2331 if (!rdma_cap_ib_sa(handler->device, port->port_num)) 2332 return; 2333 2334 spin_lock_irqsave(&port->ah_lock, flags); 2335 if (port->sm_ah) 2336 kref_put(&port->sm_ah->ref, free_sm_ah); 2337 port->sm_ah = NULL; 2338 spin_unlock_irqrestore(&port->ah_lock, flags); 2339 2340 if (event->event == IB_EVENT_SM_CHANGE || 2341 event->event == IB_EVENT_CLIENT_REREGISTER || 2342 event->event == IB_EVENT_LID_CHANGE || 2343 event->event == IB_EVENT_PORT_ACTIVE) { 2344 unsigned long delay = 2345 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); 2346 2347 spin_lock_irqsave(&port->classport_lock, flags); 2348 port->classport_info.valid = false; 2349 port->classport_info.retry_cnt = 0; 2350 spin_unlock_irqrestore(&port->classport_lock, flags); 2351 queue_delayed_work(ib_wq, 2352 &port->ib_cpi_work, delay); 2353 } 2354 queue_work(ib_wq, &sa_dev->port[port_num].update_task); 2355 } 2356 } 2357 2358 static void ib_sa_add_one(struct ib_device *device) 2359 { 2360 struct ib_sa_device *sa_dev; 2361 int s, e, i; 2362 int count = 0; 2363 2364 s = rdma_start_port(device); 2365 e = rdma_end_port(device); 2366 2367 sa_dev = kzalloc(sizeof *sa_dev + 2368 (e - s + 1) * sizeof (struct ib_sa_port), 2369 GFP_KERNEL); 2370 if (!sa_dev) 2371 return; 2372 2373 sa_dev->start_port = s; 2374 sa_dev->end_port = e; 2375 2376 for (i = 0; i <= e - s; ++i) { 2377 spin_lock_init(&sa_dev->port[i].ah_lock); 2378 if (!rdma_cap_ib_sa(device, i + 1)) 2379 continue; 2380 2381 sa_dev->port[i].sm_ah = NULL; 2382 sa_dev->port[i].port_num = i + s; 2383 2384 spin_lock_init(&sa_dev->port[i].classport_lock); 2385 sa_dev->port[i].classport_info.valid = false; 2386 2387 sa_dev->port[i].agent = 2388 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 2389 NULL, 0, send_handler, 2390 recv_handler, sa_dev, 0); 2391 if (IS_ERR(sa_dev->port[i].agent)) 2392 goto err; 2393 2394 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 2395 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work, 2396 update_ib_cpi); 2397 2398 count++; 2399 } 2400 2401 if (!count) 2402 goto free; 2403 2404 ib_set_client_data(device, &sa_client, sa_dev); 2405 2406 /* 2407 * We register our event handler after everything is set up, 2408 * and then update our cached info after the event handler is 2409 * registered to avoid any problems if a port changes state 2410 * during our initialization. 2411 */ 2412 2413 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); 2414 if (ib_register_event_handler(&sa_dev->event_handler)) 2415 goto err; 2416 2417 for (i = 0; i <= e - s; ++i) { 2418 if (rdma_cap_ib_sa(device, i + 1)) 2419 update_sm_ah(&sa_dev->port[i].update_task); 2420 } 2421 2422 return; 2423 2424 err: 2425 while (--i >= 0) { 2426 if (rdma_cap_ib_sa(device, i + 1)) 2427 ib_unregister_mad_agent(sa_dev->port[i].agent); 2428 } 2429 free: 2430 kfree(sa_dev); 2431 return; 2432 } 2433 2434 static void ib_sa_remove_one(struct ib_device *device, void *client_data) 2435 { 2436 struct ib_sa_device *sa_dev = client_data; 2437 int i; 2438 2439 if (!sa_dev) 2440 return; 2441 2442 ib_unregister_event_handler(&sa_dev->event_handler); 2443 flush_workqueue(ib_wq); 2444 2445 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 2446 if (rdma_cap_ib_sa(device, i + 1)) { 2447 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work); 2448 ib_unregister_mad_agent(sa_dev->port[i].agent); 2449 if (sa_dev->port[i].sm_ah) 2450 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 2451 } 2452 2453 } 2454 2455 kfree(sa_dev); 2456 } 2457 2458 int ib_sa_init(void) 2459 { 2460 int ret; 2461 2462 get_random_bytes(&tid, sizeof tid); 2463 2464 atomic_set(&ib_nl_sa_request_seq, 0); 2465 2466 ret = ib_register_client(&sa_client); 2467 if (ret) { 2468 pr_err("Couldn't register ib_sa client\n"); 2469 goto err1; 2470 } 2471 2472 ret = mcast_init(); 2473 if (ret) { 2474 pr_err("Couldn't initialize multicast handling\n"); 2475 goto err2; 2476 } 2477 2478 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM); 2479 if (!ib_nl_wq) { 2480 ret = -ENOMEM; 2481 goto err3; 2482 } 2483 2484 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); 2485 2486 return 0; 2487 2488 err3: 2489 mcast_cleanup(); 2490 err2: 2491 ib_unregister_client(&sa_client); 2492 err1: 2493 return ret; 2494 } 2495 2496 void ib_sa_cleanup(void) 2497 { 2498 cancel_delayed_work(&ib_nl_timed_work); 2499 flush_workqueue(ib_nl_wq); 2500 destroy_workqueue(ib_nl_wq); 2501 mcast_cleanup(); 2502 ib_unregister_client(&sa_client); 2503 idr_destroy(&query_idr); 2504 } 2505