1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/init.h> 36 #include <linux/err.h> 37 #include <linux/random.h> 38 #include <linux/spinlock.h> 39 #include <linux/slab.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/kref.h> 42 #include <linux/xarray.h> 43 #include <linux/workqueue.h> 44 #include <uapi/linux/if_ether.h> 45 #include <rdma/ib_pack.h> 46 #include <rdma/ib_cache.h> 47 #include <rdma/rdma_netlink.h> 48 #include <net/netlink.h> 49 #include <uapi/rdma/ib_user_sa.h> 50 #include <rdma/ib_marshall.h> 51 #include <rdma/ib_addr.h> 52 #include <rdma/opa_addr.h> 53 #include <rdma/rdma_cm.h> 54 #include "sa.h" 55 #include "core_priv.h" 56 57 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 58 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 59 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 60 #define IB_SA_CPI_MAX_RETRY_CNT 3 61 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */ 62 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT; 63 64 struct ib_sa_sm_ah { 65 struct ib_ah *ah; 66 struct kref ref; 67 u16 pkey_index; 68 u8 src_path_mask; 69 }; 70 71 enum rdma_class_port_info_type { 72 RDMA_CLASS_PORT_INFO_IB, 73 RDMA_CLASS_PORT_INFO_OPA 74 }; 75 76 struct rdma_class_port_info { 77 enum rdma_class_port_info_type type; 78 union { 79 struct ib_class_port_info ib; 80 struct opa_class_port_info opa; 81 }; 82 }; 83 84 struct ib_sa_classport_cache { 85 bool valid; 86 int retry_cnt; 87 struct rdma_class_port_info data; 88 }; 89 90 struct ib_sa_port { 91 struct ib_mad_agent *agent; 92 struct ib_sa_sm_ah *sm_ah; 93 struct work_struct update_task; 94 struct ib_sa_classport_cache classport_info; 95 struct delayed_work ib_cpi_work; 96 spinlock_t classport_lock; /* protects class port info set */ 97 spinlock_t ah_lock; 98 u32 port_num; 99 }; 100 101 struct ib_sa_device { 102 int start_port, end_port; 103 struct ib_event_handler event_handler; 104 struct ib_sa_port port[]; 105 }; 106 107 struct ib_sa_query { 108 void (*callback)(struct ib_sa_query *sa_query, int status, 109 struct ib_sa_mad *mad); 110 void (*rmpp_callback)(struct ib_sa_query *sa_query, int status, 111 struct ib_mad_recv_wc *mad); 112 void (*release)(struct ib_sa_query *); 113 struct ib_sa_client *client; 114 struct ib_sa_port *port; 115 struct ib_mad_send_buf *mad_buf; 116 struct ib_sa_sm_ah *sm_ah; 117 int id; 118 u32 flags; 119 struct list_head list; /* Local svc request list */ 120 u32 seq; /* Local svc request sequence number */ 121 unsigned long timeout; /* Local svc timeout */ 122 u8 path_use; /* How will the pathrecord be used */ 123 }; 124 125 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 126 #define IB_SA_CANCEL 0x00000002 127 #define IB_SA_QUERY_OPA 0x00000004 128 129 struct ib_sa_path_query { 130 void (*callback)(int status, struct sa_path_rec *rec, 131 unsigned int num_paths, void *context); 132 void *context; 133 struct ib_sa_query sa_query; 134 struct sa_path_rec *conv_pr; 135 }; 136 137 struct ib_sa_guidinfo_query { 138 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); 139 void *context; 140 struct ib_sa_query sa_query; 141 }; 142 143 struct ib_sa_classport_info_query { 144 void (*callback)(void *); 145 void *context; 146 struct ib_sa_query sa_query; 147 }; 148 149 struct ib_sa_mcmember_query { 150 void (*callback)(int, struct ib_sa_mcmember_rec *, void *); 151 void *context; 152 struct ib_sa_query sa_query; 153 }; 154 155 struct ib_sa_service_query { 156 void (*callback)(int status, struct sa_service_rec *rec, 157 unsigned int num_services, void *context); 158 void *context; 159 struct ib_sa_query sa_query; 160 }; 161 162 static LIST_HEAD(ib_nl_request_list); 163 static DEFINE_SPINLOCK(ib_nl_request_lock); 164 static atomic_t ib_nl_sa_request_seq; 165 static struct workqueue_struct *ib_nl_wq; 166 static struct delayed_work ib_nl_timed_work; 167 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = { 168 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY, 169 .len = sizeof(struct ib_path_rec_data)}, 170 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32}, 171 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64}, 172 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, 173 .len = sizeof(struct rdma_nla_ls_gid)}, 174 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY, 175 .len = sizeof(struct rdma_nla_ls_gid)}, 176 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8}, 177 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16}, 178 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16}, 179 }; 180 181 182 static int ib_sa_add_one(struct ib_device *device); 183 static void ib_sa_remove_one(struct ib_device *device, void *client_data); 184 185 static struct ib_client sa_client = { 186 .name = "sa", 187 .add = ib_sa_add_one, 188 .remove = ib_sa_remove_one 189 }; 190 191 static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); 192 193 static DEFINE_SPINLOCK(tid_lock); 194 static u32 tid; 195 196 #define PATH_REC_FIELD(field) \ 197 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \ 198 .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \ 199 .field_name = "sa_path_rec:" #field 200 201 static const struct ib_field path_rec_table[] = { 202 { PATH_REC_FIELD(service_id), 203 .offset_words = 0, 204 .offset_bits = 0, 205 .size_bits = 64 }, 206 { PATH_REC_FIELD(dgid), 207 .offset_words = 2, 208 .offset_bits = 0, 209 .size_bits = 128 }, 210 { PATH_REC_FIELD(sgid), 211 .offset_words = 6, 212 .offset_bits = 0, 213 .size_bits = 128 }, 214 { PATH_REC_FIELD(ib.dlid), 215 .offset_words = 10, 216 .offset_bits = 0, 217 .size_bits = 16 }, 218 { PATH_REC_FIELD(ib.slid), 219 .offset_words = 10, 220 .offset_bits = 16, 221 .size_bits = 16 }, 222 { PATH_REC_FIELD(ib.raw_traffic), 223 .offset_words = 11, 224 .offset_bits = 0, 225 .size_bits = 1 }, 226 { RESERVED, 227 .offset_words = 11, 228 .offset_bits = 1, 229 .size_bits = 3 }, 230 { PATH_REC_FIELD(flow_label), 231 .offset_words = 11, 232 .offset_bits = 4, 233 .size_bits = 20 }, 234 { PATH_REC_FIELD(hop_limit), 235 .offset_words = 11, 236 .offset_bits = 24, 237 .size_bits = 8 }, 238 { PATH_REC_FIELD(traffic_class), 239 .offset_words = 12, 240 .offset_bits = 0, 241 .size_bits = 8 }, 242 { PATH_REC_FIELD(reversible), 243 .offset_words = 12, 244 .offset_bits = 8, 245 .size_bits = 1 }, 246 { PATH_REC_FIELD(numb_path), 247 .offset_words = 12, 248 .offset_bits = 9, 249 .size_bits = 7 }, 250 { PATH_REC_FIELD(pkey), 251 .offset_words = 12, 252 .offset_bits = 16, 253 .size_bits = 16 }, 254 { PATH_REC_FIELD(qos_class), 255 .offset_words = 13, 256 .offset_bits = 0, 257 .size_bits = 12 }, 258 { PATH_REC_FIELD(sl), 259 .offset_words = 13, 260 .offset_bits = 12, 261 .size_bits = 4 }, 262 { PATH_REC_FIELD(mtu_selector), 263 .offset_words = 13, 264 .offset_bits = 16, 265 .size_bits = 2 }, 266 { PATH_REC_FIELD(mtu), 267 .offset_words = 13, 268 .offset_bits = 18, 269 .size_bits = 6 }, 270 { PATH_REC_FIELD(rate_selector), 271 .offset_words = 13, 272 .offset_bits = 24, 273 .size_bits = 2 }, 274 { PATH_REC_FIELD(rate), 275 .offset_words = 13, 276 .offset_bits = 26, 277 .size_bits = 6 }, 278 { PATH_REC_FIELD(packet_life_time_selector), 279 .offset_words = 14, 280 .offset_bits = 0, 281 .size_bits = 2 }, 282 { PATH_REC_FIELD(packet_life_time), 283 .offset_words = 14, 284 .offset_bits = 2, 285 .size_bits = 6 }, 286 { PATH_REC_FIELD(preference), 287 .offset_words = 14, 288 .offset_bits = 8, 289 .size_bits = 8 }, 290 { RESERVED, 291 .offset_words = 14, 292 .offset_bits = 16, 293 .size_bits = 48 }, 294 }; 295 296 #define OPA_PATH_REC_FIELD(field) \ 297 .struct_offset_bytes = \ 298 offsetof(struct sa_path_rec, field), \ 299 .struct_size_bytes = \ 300 sizeof_field(struct sa_path_rec, field), \ 301 .field_name = "sa_path_rec:" #field 302 303 static const struct ib_field opa_path_rec_table[] = { 304 { OPA_PATH_REC_FIELD(service_id), 305 .offset_words = 0, 306 .offset_bits = 0, 307 .size_bits = 64 }, 308 { OPA_PATH_REC_FIELD(dgid), 309 .offset_words = 2, 310 .offset_bits = 0, 311 .size_bits = 128 }, 312 { OPA_PATH_REC_FIELD(sgid), 313 .offset_words = 6, 314 .offset_bits = 0, 315 .size_bits = 128 }, 316 { OPA_PATH_REC_FIELD(opa.dlid), 317 .offset_words = 10, 318 .offset_bits = 0, 319 .size_bits = 32 }, 320 { OPA_PATH_REC_FIELD(opa.slid), 321 .offset_words = 11, 322 .offset_bits = 0, 323 .size_bits = 32 }, 324 { OPA_PATH_REC_FIELD(opa.raw_traffic), 325 .offset_words = 12, 326 .offset_bits = 0, 327 .size_bits = 1 }, 328 { RESERVED, 329 .offset_words = 12, 330 .offset_bits = 1, 331 .size_bits = 3 }, 332 { OPA_PATH_REC_FIELD(flow_label), 333 .offset_words = 12, 334 .offset_bits = 4, 335 .size_bits = 20 }, 336 { OPA_PATH_REC_FIELD(hop_limit), 337 .offset_words = 12, 338 .offset_bits = 24, 339 .size_bits = 8 }, 340 { OPA_PATH_REC_FIELD(traffic_class), 341 .offset_words = 13, 342 .offset_bits = 0, 343 .size_bits = 8 }, 344 { OPA_PATH_REC_FIELD(reversible), 345 .offset_words = 13, 346 .offset_bits = 8, 347 .size_bits = 1 }, 348 { OPA_PATH_REC_FIELD(numb_path), 349 .offset_words = 13, 350 .offset_bits = 9, 351 .size_bits = 7 }, 352 { OPA_PATH_REC_FIELD(pkey), 353 .offset_words = 13, 354 .offset_bits = 16, 355 .size_bits = 16 }, 356 { OPA_PATH_REC_FIELD(opa.l2_8B), 357 .offset_words = 14, 358 .offset_bits = 0, 359 .size_bits = 1 }, 360 { OPA_PATH_REC_FIELD(opa.l2_10B), 361 .offset_words = 14, 362 .offset_bits = 1, 363 .size_bits = 1 }, 364 { OPA_PATH_REC_FIELD(opa.l2_9B), 365 .offset_words = 14, 366 .offset_bits = 2, 367 .size_bits = 1 }, 368 { OPA_PATH_REC_FIELD(opa.l2_16B), 369 .offset_words = 14, 370 .offset_bits = 3, 371 .size_bits = 1 }, 372 { RESERVED, 373 .offset_words = 14, 374 .offset_bits = 4, 375 .size_bits = 2 }, 376 { OPA_PATH_REC_FIELD(opa.qos_type), 377 .offset_words = 14, 378 .offset_bits = 6, 379 .size_bits = 2 }, 380 { OPA_PATH_REC_FIELD(opa.qos_priority), 381 .offset_words = 14, 382 .offset_bits = 8, 383 .size_bits = 8 }, 384 { RESERVED, 385 .offset_words = 14, 386 .offset_bits = 16, 387 .size_bits = 3 }, 388 { OPA_PATH_REC_FIELD(sl), 389 .offset_words = 14, 390 .offset_bits = 19, 391 .size_bits = 5 }, 392 { RESERVED, 393 .offset_words = 14, 394 .offset_bits = 24, 395 .size_bits = 8 }, 396 { OPA_PATH_REC_FIELD(mtu_selector), 397 .offset_words = 15, 398 .offset_bits = 0, 399 .size_bits = 2 }, 400 { OPA_PATH_REC_FIELD(mtu), 401 .offset_words = 15, 402 .offset_bits = 2, 403 .size_bits = 6 }, 404 { OPA_PATH_REC_FIELD(rate_selector), 405 .offset_words = 15, 406 .offset_bits = 8, 407 .size_bits = 2 }, 408 { OPA_PATH_REC_FIELD(rate), 409 .offset_words = 15, 410 .offset_bits = 10, 411 .size_bits = 6 }, 412 { OPA_PATH_REC_FIELD(packet_life_time_selector), 413 .offset_words = 15, 414 .offset_bits = 16, 415 .size_bits = 2 }, 416 { OPA_PATH_REC_FIELD(packet_life_time), 417 .offset_words = 15, 418 .offset_bits = 18, 419 .size_bits = 6 }, 420 { OPA_PATH_REC_FIELD(preference), 421 .offset_words = 15, 422 .offset_bits = 24, 423 .size_bits = 8 }, 424 }; 425 426 #define MCMEMBER_REC_FIELD(field) \ 427 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ 428 .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \ 429 .field_name = "sa_mcmember_rec:" #field 430 431 static const struct ib_field mcmember_rec_table[] = { 432 { MCMEMBER_REC_FIELD(mgid), 433 .offset_words = 0, 434 .offset_bits = 0, 435 .size_bits = 128 }, 436 { MCMEMBER_REC_FIELD(port_gid), 437 .offset_words = 4, 438 .offset_bits = 0, 439 .size_bits = 128 }, 440 { MCMEMBER_REC_FIELD(qkey), 441 .offset_words = 8, 442 .offset_bits = 0, 443 .size_bits = 32 }, 444 { MCMEMBER_REC_FIELD(mlid), 445 .offset_words = 9, 446 .offset_bits = 0, 447 .size_bits = 16 }, 448 { MCMEMBER_REC_FIELD(mtu_selector), 449 .offset_words = 9, 450 .offset_bits = 16, 451 .size_bits = 2 }, 452 { MCMEMBER_REC_FIELD(mtu), 453 .offset_words = 9, 454 .offset_bits = 18, 455 .size_bits = 6 }, 456 { MCMEMBER_REC_FIELD(traffic_class), 457 .offset_words = 9, 458 .offset_bits = 24, 459 .size_bits = 8 }, 460 { MCMEMBER_REC_FIELD(pkey), 461 .offset_words = 10, 462 .offset_bits = 0, 463 .size_bits = 16 }, 464 { MCMEMBER_REC_FIELD(rate_selector), 465 .offset_words = 10, 466 .offset_bits = 16, 467 .size_bits = 2 }, 468 { MCMEMBER_REC_FIELD(rate), 469 .offset_words = 10, 470 .offset_bits = 18, 471 .size_bits = 6 }, 472 { MCMEMBER_REC_FIELD(packet_life_time_selector), 473 .offset_words = 10, 474 .offset_bits = 24, 475 .size_bits = 2 }, 476 { MCMEMBER_REC_FIELD(packet_life_time), 477 .offset_words = 10, 478 .offset_bits = 26, 479 .size_bits = 6 }, 480 { MCMEMBER_REC_FIELD(sl), 481 .offset_words = 11, 482 .offset_bits = 0, 483 .size_bits = 4 }, 484 { MCMEMBER_REC_FIELD(flow_label), 485 .offset_words = 11, 486 .offset_bits = 4, 487 .size_bits = 20 }, 488 { MCMEMBER_REC_FIELD(hop_limit), 489 .offset_words = 11, 490 .offset_bits = 24, 491 .size_bits = 8 }, 492 { MCMEMBER_REC_FIELD(scope), 493 .offset_words = 12, 494 .offset_bits = 0, 495 .size_bits = 4 }, 496 { MCMEMBER_REC_FIELD(join_state), 497 .offset_words = 12, 498 .offset_bits = 4, 499 .size_bits = 4 }, 500 { MCMEMBER_REC_FIELD(proxy_join), 501 .offset_words = 12, 502 .offset_bits = 8, 503 .size_bits = 1 }, 504 { RESERVED, 505 .offset_words = 12, 506 .offset_bits = 9, 507 .size_bits = 23 }, 508 }; 509 510 #define CLASSPORTINFO_REC_FIELD(field) \ 511 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \ 512 .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \ 513 .field_name = "ib_class_port_info:" #field 514 515 static const struct ib_field ib_classport_info_rec_table[] = { 516 { CLASSPORTINFO_REC_FIELD(base_version), 517 .offset_words = 0, 518 .offset_bits = 0, 519 .size_bits = 8 }, 520 { CLASSPORTINFO_REC_FIELD(class_version), 521 .offset_words = 0, 522 .offset_bits = 8, 523 .size_bits = 8 }, 524 { CLASSPORTINFO_REC_FIELD(capability_mask), 525 .offset_words = 0, 526 .offset_bits = 16, 527 .size_bits = 16 }, 528 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 529 .offset_words = 1, 530 .offset_bits = 0, 531 .size_bits = 32 }, 532 { CLASSPORTINFO_REC_FIELD(redirect_gid), 533 .offset_words = 2, 534 .offset_bits = 0, 535 .size_bits = 128 }, 536 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl), 537 .offset_words = 6, 538 .offset_bits = 0, 539 .size_bits = 32 }, 540 { CLASSPORTINFO_REC_FIELD(redirect_lid), 541 .offset_words = 7, 542 .offset_bits = 0, 543 .size_bits = 16 }, 544 { CLASSPORTINFO_REC_FIELD(redirect_pkey), 545 .offset_words = 7, 546 .offset_bits = 16, 547 .size_bits = 16 }, 548 549 { CLASSPORTINFO_REC_FIELD(redirect_qp), 550 .offset_words = 8, 551 .offset_bits = 0, 552 .size_bits = 32 }, 553 { CLASSPORTINFO_REC_FIELD(redirect_qkey), 554 .offset_words = 9, 555 .offset_bits = 0, 556 .size_bits = 32 }, 557 558 { CLASSPORTINFO_REC_FIELD(trap_gid), 559 .offset_words = 10, 560 .offset_bits = 0, 561 .size_bits = 128 }, 562 { CLASSPORTINFO_REC_FIELD(trap_tcslfl), 563 .offset_words = 14, 564 .offset_bits = 0, 565 .size_bits = 32 }, 566 567 { CLASSPORTINFO_REC_FIELD(trap_lid), 568 .offset_words = 15, 569 .offset_bits = 0, 570 .size_bits = 16 }, 571 { CLASSPORTINFO_REC_FIELD(trap_pkey), 572 .offset_words = 15, 573 .offset_bits = 16, 574 .size_bits = 16 }, 575 576 { CLASSPORTINFO_REC_FIELD(trap_hlqp), 577 .offset_words = 16, 578 .offset_bits = 0, 579 .size_bits = 32 }, 580 { CLASSPORTINFO_REC_FIELD(trap_qkey), 581 .offset_words = 17, 582 .offset_bits = 0, 583 .size_bits = 32 }, 584 }; 585 586 #define OPA_CLASSPORTINFO_REC_FIELD(field) \ 587 .struct_offset_bytes =\ 588 offsetof(struct opa_class_port_info, field), \ 589 .struct_size_bytes = \ 590 sizeof_field(struct opa_class_port_info, field), \ 591 .field_name = "opa_class_port_info:" #field 592 593 static const struct ib_field opa_classport_info_rec_table[] = { 594 { OPA_CLASSPORTINFO_REC_FIELD(base_version), 595 .offset_words = 0, 596 .offset_bits = 0, 597 .size_bits = 8 }, 598 { OPA_CLASSPORTINFO_REC_FIELD(class_version), 599 .offset_words = 0, 600 .offset_bits = 8, 601 .size_bits = 8 }, 602 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask), 603 .offset_words = 0, 604 .offset_bits = 16, 605 .size_bits = 16 }, 606 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 607 .offset_words = 1, 608 .offset_bits = 0, 609 .size_bits = 32 }, 610 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid), 611 .offset_words = 2, 612 .offset_bits = 0, 613 .size_bits = 128 }, 614 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl), 615 .offset_words = 6, 616 .offset_bits = 0, 617 .size_bits = 32 }, 618 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid), 619 .offset_words = 7, 620 .offset_bits = 0, 621 .size_bits = 32 }, 622 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp), 623 .offset_words = 8, 624 .offset_bits = 0, 625 .size_bits = 32 }, 626 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey), 627 .offset_words = 9, 628 .offset_bits = 0, 629 .size_bits = 32 }, 630 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid), 631 .offset_words = 10, 632 .offset_bits = 0, 633 .size_bits = 128 }, 634 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl), 635 .offset_words = 14, 636 .offset_bits = 0, 637 .size_bits = 32 }, 638 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid), 639 .offset_words = 15, 640 .offset_bits = 0, 641 .size_bits = 32 }, 642 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp), 643 .offset_words = 16, 644 .offset_bits = 0, 645 .size_bits = 32 }, 646 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey), 647 .offset_words = 17, 648 .offset_bits = 0, 649 .size_bits = 32 }, 650 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey), 651 .offset_words = 18, 652 .offset_bits = 0, 653 .size_bits = 16 }, 654 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey), 655 .offset_words = 18, 656 .offset_bits = 16, 657 .size_bits = 16 }, 658 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd), 659 .offset_words = 19, 660 .offset_bits = 0, 661 .size_bits = 8 }, 662 { RESERVED, 663 .offset_words = 19, 664 .offset_bits = 8, 665 .size_bits = 24 }, 666 }; 667 668 #define GUIDINFO_REC_FIELD(field) \ 669 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ 670 .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \ 671 .field_name = "sa_guidinfo_rec:" #field 672 673 static const struct ib_field guidinfo_rec_table[] = { 674 { GUIDINFO_REC_FIELD(lid), 675 .offset_words = 0, 676 .offset_bits = 0, 677 .size_bits = 16 }, 678 { GUIDINFO_REC_FIELD(block_num), 679 .offset_words = 0, 680 .offset_bits = 16, 681 .size_bits = 8 }, 682 { GUIDINFO_REC_FIELD(res1), 683 .offset_words = 0, 684 .offset_bits = 24, 685 .size_bits = 8 }, 686 { GUIDINFO_REC_FIELD(res2), 687 .offset_words = 1, 688 .offset_bits = 0, 689 .size_bits = 32 }, 690 { GUIDINFO_REC_FIELD(guid_info_list), 691 .offset_words = 2, 692 .offset_bits = 0, 693 .size_bits = 512 }, 694 }; 695 696 #define SERVICE_REC_FIELD(field) \ 697 .struct_offset_bytes = offsetof(struct sa_service_rec, field), \ 698 .struct_size_bytes = sizeof_field(struct sa_service_rec, field), \ 699 .field_name = "sa_service_rec:" #field 700 701 static const struct ib_field service_rec_table[] = { 702 { SERVICE_REC_FIELD(id), 703 .offset_words = 0, 704 .offset_bits = 0, 705 .size_bits = 64 }, 706 { SERVICE_REC_FIELD(gid), 707 .offset_words = 2, 708 .offset_bits = 0, 709 .size_bits = 128 }, 710 { SERVICE_REC_FIELD(pkey), 711 .offset_words = 6, 712 .offset_bits = 0, 713 .size_bits = 16 }, 714 { RESERVED, 715 .offset_words = 6, 716 .offset_bits = 16, 717 .size_bits = 16 }, 718 { SERVICE_REC_FIELD(lease), 719 .offset_words = 7, 720 .offset_bits = 0, 721 .size_bits = 32 }, 722 { SERVICE_REC_FIELD(key), 723 .offset_words = 8, 724 .offset_bits = 0, 725 .size_bits = 128 }, 726 { SERVICE_REC_FIELD(name), 727 .offset_words = 12, 728 .offset_bits = 0, 729 .size_bits = 512 }, 730 { SERVICE_REC_FIELD(data_8), 731 .offset_words = 28, 732 .offset_bits = 0, 733 .size_bits = 128 }, 734 { SERVICE_REC_FIELD(data_16), 735 .offset_words = 32, 736 .offset_bits = 0, 737 .size_bits = 128 }, 738 { SERVICE_REC_FIELD(data_32), 739 .offset_words = 36, 740 .offset_bits = 0, 741 .size_bits = 128 }, 742 { SERVICE_REC_FIELD(data_64), 743 .offset_words = 40, 744 .offset_bits = 0, 745 .size_bits = 128 }, 746 }; 747 748 #define RDMA_PRIMARY_PATH_MAX_REC_NUM 3 749 750 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) 751 { 752 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; 753 } 754 755 static inline int ib_sa_query_cancelled(struct ib_sa_query *query) 756 { 757 return (query->flags & IB_SA_CANCEL); 758 } 759 760 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, 761 struct ib_sa_query *query) 762 { 763 struct sa_path_rec *sa_rec = query->mad_buf->context[1]; 764 struct ib_sa_mad *mad = query->mad_buf->mad; 765 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; 766 u16 val16; 767 u64 val64; 768 struct rdma_ls_resolve_header *header; 769 770 query->mad_buf->context[1] = NULL; 771 772 /* Construct the family header first */ 773 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 774 strscpy_pad(header->device_name, 775 dev_name(&query->port->agent->device->dev), 776 LS_DEVICE_NAME_MAX); 777 header->port_num = query->port->port_num; 778 779 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && 780 sa_rec->reversible != 0) 781 query->path_use = LS_RESOLVE_PATH_USE_ALL; 782 else 783 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; 784 header->path_use = query->path_use; 785 786 /* Now build the attributes */ 787 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 788 val64 = be64_to_cpu(sa_rec->service_id); 789 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 790 sizeof(val64), &val64); 791 } 792 if (comp_mask & IB_SA_PATH_REC_DGID) 793 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID, 794 sizeof(sa_rec->dgid), &sa_rec->dgid); 795 if (comp_mask & IB_SA_PATH_REC_SGID) 796 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID, 797 sizeof(sa_rec->sgid), &sa_rec->sgid); 798 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 799 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS, 800 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class); 801 802 if (comp_mask & IB_SA_PATH_REC_PKEY) { 803 val16 = be16_to_cpu(sa_rec->pkey); 804 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY, 805 sizeof(val16), &val16); 806 } 807 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) { 808 val16 = be16_to_cpu(sa_rec->qos_class); 809 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS, 810 sizeof(val16), &val16); 811 } 812 } 813 814 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) 815 { 816 int len = 0; 817 818 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) 819 len += nla_total_size(sizeof(u64)); 820 if (comp_mask & IB_SA_PATH_REC_DGID) 821 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 822 if (comp_mask & IB_SA_PATH_REC_SGID) 823 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 824 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 825 len += nla_total_size(sizeof(u8)); 826 if (comp_mask & IB_SA_PATH_REC_PKEY) 827 len += nla_total_size(sizeof(u16)); 828 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) 829 len += nla_total_size(sizeof(u16)); 830 831 /* 832 * Make sure that at least some of the required comp_mask bits are 833 * set. 834 */ 835 if (WARN_ON(len == 0)) 836 return len; 837 838 /* Add the family header */ 839 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header)); 840 841 return len; 842 } 843 844 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) 845 { 846 struct sk_buff *skb = NULL; 847 struct nlmsghdr *nlh; 848 void *data; 849 struct ib_sa_mad *mad; 850 int len; 851 unsigned long flags; 852 unsigned long delay; 853 gfp_t gfp_flag; 854 int ret; 855 856 INIT_LIST_HEAD(&query->list); 857 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 858 859 mad = query->mad_buf->mad; 860 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); 861 if (len <= 0) 862 return -EMSGSIZE; 863 864 skb = nlmsg_new(len, gfp_mask); 865 if (!skb) 866 return -ENOMEM; 867 868 /* Put nlmsg header only for now */ 869 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, 870 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); 871 if (!data) { 872 nlmsg_free(skb); 873 return -EMSGSIZE; 874 } 875 876 /* Add attributes */ 877 ib_nl_set_path_rec_attrs(skb, query); 878 879 /* Repair the nlmsg header length */ 880 nlmsg_end(skb, nlh); 881 882 gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC : 883 GFP_NOWAIT; 884 885 spin_lock_irqsave(&ib_nl_request_lock, flags); 886 ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag); 887 888 if (ret) 889 goto out; 890 891 /* Put the request on the list.*/ 892 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 893 query->timeout = delay + jiffies; 894 list_add_tail(&query->list, &ib_nl_request_list); 895 /* Start the timeout if this is the only request */ 896 if (ib_nl_request_list.next == &query->list) 897 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 898 899 out: 900 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 901 902 return ret; 903 } 904 905 static int ib_nl_cancel_request(struct ib_sa_query *query) 906 { 907 unsigned long flags; 908 struct ib_sa_query *wait_query; 909 int found = 0; 910 911 spin_lock_irqsave(&ib_nl_request_lock, flags); 912 list_for_each_entry(wait_query, &ib_nl_request_list, list) { 913 /* Let the timeout to take care of the callback */ 914 if (query == wait_query) { 915 query->flags |= IB_SA_CANCEL; 916 query->timeout = jiffies; 917 list_move(&query->list, &ib_nl_request_list); 918 found = 1; 919 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1); 920 break; 921 } 922 } 923 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 924 925 return found; 926 } 927 928 static void send_handler(struct ib_mad_agent *agent, 929 struct ib_mad_send_wc *mad_send_wc); 930 931 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, 932 const struct nlmsghdr *nlh) 933 { 934 struct sa_path_rec recs[RDMA_PRIMARY_PATH_MAX_REC_NUM]; 935 struct ib_sa_path_query *path_query; 936 struct ib_path_rec_data *rec_data; 937 struct ib_mad_send_wc mad_send_wc; 938 const struct nlattr *head, *curr; 939 struct ib_sa_mad *mad = NULL; 940 int len, rem, status = -EIO; 941 unsigned int num_prs = 0; 942 u32 mask = 0; 943 944 if (!query->callback) 945 goto out; 946 947 path_query = container_of(query, struct ib_sa_path_query, sa_query); 948 mad = query->mad_buf->mad; 949 950 head = (const struct nlattr *) nlmsg_data(nlh); 951 len = nlmsg_len(nlh); 952 switch (query->path_use) { 953 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL: 954 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND; 955 break; 956 957 case LS_RESOLVE_PATH_USE_ALL: 958 mask = IB_PATH_PRIMARY; 959 break; 960 961 case LS_RESOLVE_PATH_USE_GMP: 962 default: 963 mask = IB_PATH_PRIMARY | IB_PATH_GMP | 964 IB_PATH_BIDIRECTIONAL; 965 break; 966 } 967 968 nla_for_each_attr(curr, head, len, rem) { 969 if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD) 970 continue; 971 972 rec_data = nla_data(curr); 973 if ((rec_data->flags & mask) != mask) 974 continue; 975 976 if ((query->flags & IB_SA_QUERY_OPA) || 977 path_query->conv_pr) { 978 mad->mad_hdr.method |= IB_MGMT_METHOD_RESP; 979 memcpy(mad->data, rec_data->path_rec, 980 sizeof(rec_data->path_rec)); 981 query->callback(query, 0, mad); 982 goto out; 983 } 984 985 status = 0; 986 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 987 rec_data->path_rec, &recs[num_prs]); 988 recs[num_prs].flags = rec_data->flags; 989 recs[num_prs].rec_type = SA_PATH_REC_TYPE_IB; 990 sa_path_set_dmac_zero(&recs[num_prs]); 991 992 num_prs++; 993 if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM) 994 break; 995 } 996 997 if (!status) { 998 mad->mad_hdr.method |= IB_MGMT_METHOD_RESP; 999 path_query->callback(status, recs, num_prs, 1000 path_query->context); 1001 } else 1002 query->callback(query, status, mad); 1003 1004 out: 1005 mad_send_wc.send_buf = query->mad_buf; 1006 mad_send_wc.status = IB_WC_SUCCESS; 1007 send_handler(query->mad_buf->mad_agent, &mad_send_wc); 1008 } 1009 1010 static void ib_nl_request_timeout(struct work_struct *work) 1011 { 1012 unsigned long flags; 1013 struct ib_sa_query *query; 1014 unsigned long delay; 1015 struct ib_mad_send_wc mad_send_wc; 1016 int ret; 1017 1018 spin_lock_irqsave(&ib_nl_request_lock, flags); 1019 while (!list_empty(&ib_nl_request_list)) { 1020 query = list_entry(ib_nl_request_list.next, 1021 struct ib_sa_query, list); 1022 1023 if (time_after(query->timeout, jiffies)) { 1024 delay = query->timeout - jiffies; 1025 if ((long)delay <= 0) 1026 delay = 1; 1027 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 1028 break; 1029 } 1030 1031 list_del(&query->list); 1032 ib_sa_disable_local_svc(query); 1033 /* Hold the lock to protect against query cancellation */ 1034 if (ib_sa_query_cancelled(query)) 1035 ret = -1; 1036 else 1037 ret = ib_post_send_mad(query->mad_buf, NULL); 1038 if (ret) { 1039 mad_send_wc.send_buf = query->mad_buf; 1040 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 1041 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1042 send_handler(query->port->agent, &mad_send_wc); 1043 spin_lock_irqsave(&ib_nl_request_lock, flags); 1044 } 1045 } 1046 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1047 } 1048 1049 int ib_nl_handle_set_timeout(struct sk_buff *skb, 1050 struct nlmsghdr *nlh, 1051 struct netlink_ext_ack *extack) 1052 { 1053 int timeout, delta, abs_delta; 1054 const struct nlattr *attr; 1055 unsigned long flags; 1056 struct ib_sa_query *query; 1057 long delay = 0; 1058 struct nlattr *tb[LS_NLA_TYPE_MAX]; 1059 int ret; 1060 1061 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) || 1062 !(NETLINK_CB(skb).sk)) 1063 return -EPERM; 1064 1065 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 1066 nlmsg_len(nlh), ib_nl_policy, NULL); 1067 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; 1068 if (ret || !attr) 1069 goto settimeout_out; 1070 1071 timeout = *(int *) nla_data(attr); 1072 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN) 1073 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN; 1074 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) 1075 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; 1076 1077 spin_lock_irqsave(&ib_nl_request_lock, flags); 1078 1079 delta = timeout - sa_local_svc_timeout_ms; 1080 if (delta < 0) 1081 abs_delta = -delta; 1082 else 1083 abs_delta = delta; 1084 1085 if (delta != 0) { 1086 sa_local_svc_timeout_ms = timeout; 1087 list_for_each_entry(query, &ib_nl_request_list, list) { 1088 if (delta < 0 && abs_delta > query->timeout) 1089 query->timeout = 0; 1090 else 1091 query->timeout += delta; 1092 1093 /* Get the new delay from the first entry */ 1094 if (!delay) { 1095 delay = query->timeout - jiffies; 1096 if (delay <= 0) 1097 delay = 1; 1098 } 1099 } 1100 if (delay) 1101 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1102 (unsigned long)delay); 1103 } 1104 1105 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1106 1107 settimeout_out: 1108 return 0; 1109 } 1110 1111 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) 1112 { 1113 struct nlattr *tb[LS_NLA_TYPE_MAX]; 1114 int ret; 1115 1116 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) 1117 return 0; 1118 1119 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 1120 nlmsg_len(nlh), ib_nl_policy, NULL); 1121 if (ret) 1122 return 0; 1123 1124 return 1; 1125 } 1126 1127 int ib_nl_handle_resolve_resp(struct sk_buff *skb, 1128 struct nlmsghdr *nlh, 1129 struct netlink_ext_ack *extack) 1130 { 1131 unsigned long flags; 1132 struct ib_sa_query *query = NULL, *iter; 1133 struct ib_mad_send_buf *send_buf; 1134 struct ib_mad_send_wc mad_send_wc; 1135 int ret; 1136 1137 if ((nlh->nlmsg_flags & NLM_F_REQUEST) || 1138 !(NETLINK_CB(skb).sk)) 1139 return -EPERM; 1140 1141 spin_lock_irqsave(&ib_nl_request_lock, flags); 1142 list_for_each_entry(iter, &ib_nl_request_list, list) { 1143 /* 1144 * If the query is cancelled, let the timeout routine 1145 * take care of it. 1146 */ 1147 if (nlh->nlmsg_seq == iter->seq) { 1148 if (!ib_sa_query_cancelled(iter)) { 1149 list_del(&iter->list); 1150 query = iter; 1151 } 1152 break; 1153 } 1154 } 1155 1156 if (!query) { 1157 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1158 goto resp_out; 1159 } 1160 1161 send_buf = query->mad_buf; 1162 1163 if (!ib_nl_is_good_resolve_resp(nlh)) { 1164 /* if the result is a failure, send out the packet via IB */ 1165 ib_sa_disable_local_svc(query); 1166 ret = ib_post_send_mad(query->mad_buf, NULL); 1167 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1168 if (ret) { 1169 mad_send_wc.send_buf = send_buf; 1170 mad_send_wc.status = IB_WC_GENERAL_ERR; 1171 send_handler(query->port->agent, &mad_send_wc); 1172 } 1173 } else { 1174 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1175 ib_nl_process_good_resolve_rsp(query, nlh); 1176 } 1177 1178 resp_out: 1179 return 0; 1180 } 1181 1182 static void free_sm_ah(struct kref *kref) 1183 { 1184 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 1185 1186 rdma_destroy_ah(sm_ah->ah, 0); 1187 kfree(sm_ah); 1188 } 1189 1190 void ib_sa_register_client(struct ib_sa_client *client) 1191 { 1192 atomic_set(&client->users, 1); 1193 init_completion(&client->comp); 1194 } 1195 EXPORT_SYMBOL(ib_sa_register_client); 1196 1197 void ib_sa_unregister_client(struct ib_sa_client *client) 1198 { 1199 ib_sa_client_put(client); 1200 wait_for_completion(&client->comp); 1201 } 1202 EXPORT_SYMBOL(ib_sa_unregister_client); 1203 1204 /** 1205 * ib_sa_cancel_query - try to cancel an SA query 1206 * @id:ID of query to cancel 1207 * @query:query pointer to cancel 1208 * 1209 * Try to cancel an SA query. If the id and query don't match up or 1210 * the query has already completed, nothing is done. Otherwise the 1211 * query is canceled and will complete with a status of -EINTR. 1212 */ 1213 void ib_sa_cancel_query(int id, struct ib_sa_query *query) 1214 { 1215 unsigned long flags; 1216 struct ib_mad_send_buf *mad_buf; 1217 1218 xa_lock_irqsave(&queries, flags); 1219 if (xa_load(&queries, id) != query) { 1220 xa_unlock_irqrestore(&queries, flags); 1221 return; 1222 } 1223 mad_buf = query->mad_buf; 1224 xa_unlock_irqrestore(&queries, flags); 1225 1226 /* 1227 * If the query is still on the netlink request list, schedule 1228 * it to be cancelled by the timeout routine. Otherwise, it has been 1229 * sent to the MAD layer and has to be cancelled from there. 1230 */ 1231 if (!ib_nl_cancel_request(query)) 1232 ib_cancel_mad(mad_buf); 1233 } 1234 EXPORT_SYMBOL(ib_sa_cancel_query); 1235 1236 static u8 get_src_path_mask(struct ib_device *device, u32 port_num) 1237 { 1238 struct ib_sa_device *sa_dev; 1239 struct ib_sa_port *port; 1240 unsigned long flags; 1241 u8 src_path_mask; 1242 1243 sa_dev = ib_get_client_data(device, &sa_client); 1244 if (!sa_dev) 1245 return 0x7f; 1246 1247 port = &sa_dev->port[port_num - sa_dev->start_port]; 1248 spin_lock_irqsave(&port->ah_lock, flags); 1249 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; 1250 spin_unlock_irqrestore(&port->ah_lock, flags); 1251 1252 return src_path_mask; 1253 } 1254 1255 static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num, 1256 struct sa_path_rec *rec, 1257 struct rdma_ah_attr *ah_attr, 1258 const struct ib_gid_attr *gid_attr) 1259 { 1260 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec); 1261 1262 if (!gid_attr) { 1263 gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type, 1264 port_num, NULL); 1265 if (IS_ERR(gid_attr)) 1266 return PTR_ERR(gid_attr); 1267 } else 1268 rdma_hold_gid_attr(gid_attr); 1269 1270 rdma_move_grh_sgid_attr(ah_attr, &rec->dgid, 1271 be32_to_cpu(rec->flow_label), 1272 rec->hop_limit, rec->traffic_class, 1273 gid_attr); 1274 return 0; 1275 } 1276 1277 /** 1278 * ib_init_ah_attr_from_path - Initialize address handle attributes based on 1279 * an SA path record. 1280 * @device: Device associated ah attributes initialization. 1281 * @port_num: Port on the specified device. 1282 * @rec: path record entry to use for ah attributes initialization. 1283 * @ah_attr: address handle attributes to initialization from path record. 1284 * @gid_attr: SGID attribute to consider during initialization. 1285 * 1286 * When ib_init_ah_attr_from_path() returns success, 1287 * (a) for IB link layer it optionally contains a reference to SGID attribute 1288 * when GRH is present for IB link layer. 1289 * (b) for RoCE link layer it contains a reference to SGID attribute. 1290 * User must invoke rdma_destroy_ah_attr() to release reference to SGID 1291 * attributes which are initialized using ib_init_ah_attr_from_path(). 1292 */ 1293 int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num, 1294 struct sa_path_rec *rec, 1295 struct rdma_ah_attr *ah_attr, 1296 const struct ib_gid_attr *gid_attr) 1297 { 1298 int ret = 0; 1299 1300 memset(ah_attr, 0, sizeof(*ah_attr)); 1301 ah_attr->type = rdma_ah_find_type(device, port_num); 1302 rdma_ah_set_sl(ah_attr, rec->sl); 1303 rdma_ah_set_port_num(ah_attr, port_num); 1304 rdma_ah_set_static_rate(ah_attr, rec->rate); 1305 1306 if (sa_path_is_roce(rec)) { 1307 ret = roce_resolve_route_from_path(rec, gid_attr); 1308 if (ret) 1309 return ret; 1310 1311 memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN); 1312 } else { 1313 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec))); 1314 if (sa_path_is_opa(rec) && 1315 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) 1316 rdma_ah_set_make_grd(ah_attr, true); 1317 1318 rdma_ah_set_path_bits(ah_attr, 1319 be32_to_cpu(sa_path_get_slid(rec)) & 1320 get_src_path_mask(device, port_num)); 1321 } 1322 1323 if (rec->hop_limit > 0 || sa_path_is_roce(rec)) 1324 ret = init_ah_attr_grh_fields(device, port_num, 1325 rec, ah_attr, gid_attr); 1326 return ret; 1327 } 1328 EXPORT_SYMBOL(ib_init_ah_attr_from_path); 1329 1330 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) 1331 { 1332 struct rdma_ah_attr ah_attr; 1333 unsigned long flags; 1334 1335 spin_lock_irqsave(&query->port->ah_lock, flags); 1336 if (!query->port->sm_ah) { 1337 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1338 return -EAGAIN; 1339 } 1340 kref_get(&query->port->sm_ah->ref); 1341 query->sm_ah = query->port->sm_ah; 1342 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1343 1344 /* 1345 * Always check if sm_ah has valid dlid assigned, 1346 * before querying for class port info 1347 */ 1348 if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) || 1349 !rdma_is_valid_unicast_lid(&ah_attr)) { 1350 kref_put(&query->sm_ah->ref, free_sm_ah); 1351 return -EAGAIN; 1352 } 1353 query->mad_buf = ib_create_send_mad(query->port->agent, 1, 1354 query->sm_ah->pkey_index, 1355 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 1356 gfp_mask, 1357 ((query->flags & IB_SA_QUERY_OPA) ? 1358 OPA_MGMT_BASE_VERSION : 1359 IB_MGMT_BASE_VERSION)); 1360 if (IS_ERR(query->mad_buf)) { 1361 kref_put(&query->sm_ah->ref, free_sm_ah); 1362 return -ENOMEM; 1363 } 1364 1365 query->mad_buf->ah = query->sm_ah->ah; 1366 1367 return 0; 1368 } 1369 1370 static void free_mad(struct ib_sa_query *query) 1371 { 1372 ib_free_send_mad(query->mad_buf); 1373 kref_put(&query->sm_ah->ref, free_sm_ah); 1374 } 1375 1376 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent) 1377 { 1378 struct ib_sa_mad *mad = query->mad_buf->mad; 1379 unsigned long flags; 1380 1381 memset(mad, 0, sizeof *mad); 1382 1383 if (query->flags & IB_SA_QUERY_OPA) { 1384 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION; 1385 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION; 1386 } else { 1387 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; 1388 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; 1389 } 1390 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 1391 spin_lock_irqsave(&tid_lock, flags); 1392 mad->mad_hdr.tid = 1393 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); 1394 spin_unlock_irqrestore(&tid_lock, flags); 1395 } 1396 1397 static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms, 1398 gfp_t gfp_mask) 1399 { 1400 unsigned long flags; 1401 int ret, id; 1402 const int nmbr_sa_query_retries = 10; 1403 1404 xa_lock_irqsave(&queries, flags); 1405 ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask); 1406 xa_unlock_irqrestore(&queries, flags); 1407 if (ret < 0) 1408 return ret; 1409 1410 query->mad_buf->timeout_ms = timeout_ms / nmbr_sa_query_retries; 1411 query->mad_buf->retries = nmbr_sa_query_retries; 1412 if (!query->mad_buf->timeout_ms) { 1413 /* Special case, very small timeout_ms */ 1414 query->mad_buf->timeout_ms = 1; 1415 query->mad_buf->retries = timeout_ms; 1416 } 1417 query->mad_buf->context[0] = query; 1418 query->id = id; 1419 1420 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) && 1421 (!(query->flags & IB_SA_QUERY_OPA))) { 1422 if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) { 1423 if (!ib_nl_make_request(query, gfp_mask)) 1424 return id; 1425 } 1426 ib_sa_disable_local_svc(query); 1427 } 1428 1429 ret = ib_post_send_mad(query->mad_buf, NULL); 1430 if (ret) { 1431 xa_lock_irqsave(&queries, flags); 1432 __xa_erase(&queries, id); 1433 xa_unlock_irqrestore(&queries, flags); 1434 } 1435 1436 /* 1437 * It's not safe to dereference query any more, because the 1438 * send may already have completed and freed the query in 1439 * another context. 1440 */ 1441 return ret ? ret : id; 1442 } 1443 1444 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec) 1445 { 1446 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); 1447 } 1448 EXPORT_SYMBOL(ib_sa_unpack_path); 1449 1450 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute) 1451 { 1452 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); 1453 } 1454 EXPORT_SYMBOL(ib_sa_pack_path); 1455 1456 void ib_sa_pack_service(struct sa_service_rec *rec, void *attribute) 1457 { 1458 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), rec, 1459 attribute); 1460 } 1461 EXPORT_SYMBOL(ib_sa_pack_service); 1462 1463 void ib_sa_unpack_service(void *attribute, struct sa_service_rec *rec) 1464 { 1465 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), attribute, 1466 rec); 1467 } 1468 EXPORT_SYMBOL(ib_sa_unpack_service); 1469 1470 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client, 1471 struct ib_sa_device *sa_dev, 1472 u32 port_num) 1473 { 1474 struct ib_sa_port *port; 1475 unsigned long flags; 1476 bool ret = false; 1477 1478 port = &sa_dev->port[port_num - sa_dev->start_port]; 1479 spin_lock_irqsave(&port->classport_lock, flags); 1480 if (!port->classport_info.valid) 1481 goto ret; 1482 1483 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA) 1484 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) & 1485 OPA_CLASS_PORT_INFO_PR_SUPPORT; 1486 ret: 1487 spin_unlock_irqrestore(&port->classport_lock, flags); 1488 return ret; 1489 } 1490 1491 enum opa_pr_supported { 1492 PR_NOT_SUPPORTED, 1493 PR_OPA_SUPPORTED, 1494 PR_IB_SUPPORTED 1495 }; 1496 1497 /* 1498 * opa_pr_query_possible - Check if current PR query can be an OPA query. 1499 * 1500 * Returns PR_NOT_SUPPORTED if a path record query is not 1501 * possible, PR_OPA_SUPPORTED if an OPA path record query 1502 * is possible and PR_IB_SUPPORTED if an IB path record 1503 * query is possible. 1504 */ 1505 static int opa_pr_query_possible(struct ib_sa_client *client, 1506 struct ib_sa_device *sa_dev, 1507 struct ib_device *device, u32 port_num) 1508 { 1509 struct ib_port_attr port_attr; 1510 1511 if (ib_query_port(device, port_num, &port_attr)) 1512 return PR_NOT_SUPPORTED; 1513 1514 if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num)) 1515 return PR_OPA_SUPPORTED; 1516 1517 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 1518 return PR_NOT_SUPPORTED; 1519 else 1520 return PR_IB_SUPPORTED; 1521 } 1522 1523 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 1524 int status, struct ib_sa_mad *mad) 1525 { 1526 struct ib_sa_path_query *query = 1527 container_of(sa_query, struct ib_sa_path_query, sa_query); 1528 struct sa_path_rec rec = {}; 1529 1530 if (!mad) { 1531 query->callback(status, NULL, 0, query->context); 1532 return; 1533 } 1534 1535 if (sa_query->flags & IB_SA_QUERY_OPA) { 1536 ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), 1537 mad->data, &rec); 1538 rec.rec_type = SA_PATH_REC_TYPE_OPA; 1539 query->callback(status, &rec, 1, query->context); 1540 return; 1541 } 1542 1543 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 1544 mad->data, &rec); 1545 rec.rec_type = SA_PATH_REC_TYPE_IB; 1546 sa_path_set_dmac_zero(&rec); 1547 1548 if (query->conv_pr) { 1549 struct sa_path_rec opa; 1550 1551 memset(&opa, 0, sizeof(struct sa_path_rec)); 1552 sa_convert_path_ib_to_opa(&opa, &rec); 1553 query->callback(status, &opa, 1, query->context); 1554 } else { 1555 query->callback(status, &rec, 1, query->context); 1556 } 1557 } 1558 1559 #define IB_SA_DATA_OFFS 56 1560 #define IB_SERVICE_REC_SZ 176 1561 1562 static void ib_unpack_service_rmpp(struct sa_service_rec *rec, 1563 struct ib_mad_recv_wc *mad_wc, 1564 int num_services) 1565 { 1566 unsigned int cp_sz, data_i, data_size, rec_i = 0, buf_i = 0; 1567 struct ib_mad_recv_buf *mad_buf; 1568 u8 buf[IB_SERVICE_REC_SZ]; 1569 u8 *data; 1570 1571 data_size = sizeof(((struct ib_sa_mad *) mad_buf->mad)->data); 1572 1573 list_for_each_entry(mad_buf, &mad_wc->rmpp_list, list) { 1574 data = ((struct ib_sa_mad *) mad_buf->mad)->data; 1575 data_i = 0; 1576 while (data_i < data_size && rec_i < num_services) { 1577 cp_sz = min(IB_SERVICE_REC_SZ - buf_i, 1578 data_size - data_i); 1579 memcpy(buf + buf_i, data + data_i, cp_sz); 1580 data_i += cp_sz; 1581 buf_i += cp_sz; 1582 if (buf_i == IB_SERVICE_REC_SZ) { 1583 ib_sa_unpack_service(buf, rec + rec_i); 1584 buf_i = 0; 1585 rec_i++; 1586 } 1587 } 1588 } 1589 } 1590 1591 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, int status, 1592 struct ib_mad_recv_wc *mad_wc) 1593 { 1594 struct ib_sa_service_query *query = 1595 container_of(sa_query, struct ib_sa_service_query, sa_query); 1596 struct sa_service_rec *rec; 1597 int num_services; 1598 1599 if (!mad_wc || !mad_wc->recv_buf.mad) { 1600 query->callback(status, NULL, 0, query->context); 1601 return; 1602 } 1603 1604 num_services = (mad_wc->mad_len - IB_SA_DATA_OFFS) / IB_SERVICE_REC_SZ; 1605 if (!num_services) { 1606 query->callback(-ENODATA, NULL, 0, query->context); 1607 return; 1608 } 1609 1610 rec = kmalloc_array(num_services, sizeof(*rec), GFP_KERNEL); 1611 if (!rec) { 1612 query->callback(-ENOMEM, NULL, 0, query->context); 1613 return; 1614 } 1615 1616 ib_unpack_service_rmpp(rec, mad_wc, num_services); 1617 query->callback(status, rec, num_services, query->context); 1618 kfree(rec); 1619 } 1620 1621 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 1622 { 1623 struct ib_sa_path_query *query = 1624 container_of(sa_query, struct ib_sa_path_query, sa_query); 1625 1626 kfree(query->conv_pr); 1627 kfree(query); 1628 } 1629 1630 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) 1631 { 1632 struct ib_sa_service_query *query = 1633 container_of(sa_query, struct ib_sa_service_query, sa_query); 1634 1635 kfree(query); 1636 } 1637 1638 /** 1639 * ib_sa_path_rec_get - Start a Path get query 1640 * @client:SA client 1641 * @device:device to send query on 1642 * @port_num: port number to send query on 1643 * @rec:Path Record to send in query 1644 * @comp_mask:component mask to send in query 1645 * @timeout_ms:time to wait for response 1646 * @gfp_mask:GFP mask to use for internal allocations 1647 * @callback:function called when query completes, times out or is 1648 * canceled 1649 * @context:opaque user context passed to callback 1650 * @sa_query:query context, used to cancel query 1651 * 1652 * Send a Path Record Get query to the SA to look up a path. The 1653 * callback function will be called when the query completes (or 1654 * fails); status is 0 for a successful response, -EINTR if the query 1655 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1656 * occurred sending the query. The resp parameter of the callback is 1657 * only valid if status is 0. 1658 * 1659 * If the return value of ib_sa_path_rec_get() is negative, it is an 1660 * error code. Otherwise it is a query ID that can be used to cancel 1661 * the query. 1662 */ 1663 int ib_sa_path_rec_get(struct ib_sa_client *client, 1664 struct ib_device *device, u32 port_num, 1665 struct sa_path_rec *rec, 1666 ib_sa_comp_mask comp_mask, 1667 unsigned long timeout_ms, gfp_t gfp_mask, 1668 void (*callback)(int status, 1669 struct sa_path_rec *resp, 1670 unsigned int num_paths, void *context), 1671 void *context, 1672 struct ib_sa_query **sa_query) 1673 { 1674 struct ib_sa_path_query *query; 1675 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1676 struct ib_sa_port *port; 1677 struct ib_mad_agent *agent; 1678 struct ib_sa_mad *mad; 1679 enum opa_pr_supported status; 1680 int ret; 1681 1682 if (!sa_dev) 1683 return -ENODEV; 1684 1685 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) && 1686 (rec->rec_type != SA_PATH_REC_TYPE_OPA)) 1687 return -EINVAL; 1688 1689 port = &sa_dev->port[port_num - sa_dev->start_port]; 1690 agent = port->agent; 1691 1692 query = kzalloc(sizeof(*query), gfp_mask); 1693 if (!query) 1694 return -ENOMEM; 1695 1696 query->sa_query.port = port; 1697 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { 1698 status = opa_pr_query_possible(client, sa_dev, device, port_num); 1699 if (status == PR_NOT_SUPPORTED) { 1700 ret = -EINVAL; 1701 goto err1; 1702 } else if (status == PR_OPA_SUPPORTED) { 1703 query->sa_query.flags |= IB_SA_QUERY_OPA; 1704 } else { 1705 query->conv_pr = 1706 kmalloc(sizeof(*query->conv_pr), gfp_mask); 1707 if (!query->conv_pr) { 1708 ret = -ENOMEM; 1709 goto err1; 1710 } 1711 } 1712 } 1713 1714 ret = alloc_mad(&query->sa_query, gfp_mask); 1715 if (ret) 1716 goto err2; 1717 1718 ib_sa_client_get(client); 1719 query->sa_query.client = client; 1720 query->callback = callback; 1721 query->context = context; 1722 1723 mad = query->sa_query.mad_buf->mad; 1724 init_mad(&query->sa_query, agent); 1725 1726 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 1727 query->sa_query.release = ib_sa_path_rec_release; 1728 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1729 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 1730 mad->sa_hdr.comp_mask = comp_mask; 1731 1732 if (query->sa_query.flags & IB_SA_QUERY_OPA) { 1733 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), 1734 rec, mad->data); 1735 } else if (query->conv_pr) { 1736 sa_convert_path_opa_to_ib(query->conv_pr, rec); 1737 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 1738 query->conv_pr, mad->data); 1739 } else { 1740 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 1741 rec, mad->data); 1742 } 1743 1744 *sa_query = &query->sa_query; 1745 1746 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; 1747 query->sa_query.mad_buf->context[1] = (query->conv_pr) ? 1748 query->conv_pr : rec; 1749 1750 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1751 if (ret < 0) 1752 goto err3; 1753 1754 return ret; 1755 1756 err3: 1757 *sa_query = NULL; 1758 ib_sa_client_put(query->sa_query.client); 1759 free_mad(&query->sa_query); 1760 err2: 1761 kfree(query->conv_pr); 1762 err1: 1763 kfree(query); 1764 return ret; 1765 } 1766 EXPORT_SYMBOL(ib_sa_path_rec_get); 1767 1768 /** 1769 * ib_sa_service_rec_get - Start a Service get query 1770 * @client: SA client 1771 * @device: device to send query on 1772 * @port_num: port number to send query on 1773 * @rec: Service Record to send in query 1774 * @comp_mask: component mask to send in query 1775 * @timeout_ms: time to wait for response 1776 * @gfp_mask: GFP mask to use for internal allocations 1777 * @callback: function called when query completes, times out or is 1778 * canceled 1779 * @context: opaque user context passed to callback 1780 * @sa_query: query context, used to cancel query 1781 * 1782 * Send a Service Record Get query to the SA to look up a path. The 1783 * callback function will be called when the query completes (or 1784 * fails); status is 0 for a successful response, -EINTR if the query 1785 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1786 * occurred sending the query. The resp parameter of the callback is 1787 * only valid if status is 0. 1788 * 1789 * If the return value of ib_sa_service_rec_get() is negative, it is an 1790 * error code. Otherwise it is a query ID that can be used to cancel 1791 * the query. 1792 */ 1793 int ib_sa_service_rec_get(struct ib_sa_client *client, 1794 struct ib_device *device, u32 port_num, 1795 struct sa_service_rec *rec, 1796 ib_sa_comp_mask comp_mask, 1797 unsigned long timeout_ms, gfp_t gfp_mask, 1798 void (*callback)(int status, 1799 struct sa_service_rec *resp, 1800 unsigned int num_services, 1801 void *context), 1802 void *context, struct ib_sa_query **sa_query) 1803 { 1804 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1805 struct ib_sa_service_query *query; 1806 struct ib_mad_agent *agent; 1807 struct ib_sa_port *port; 1808 struct ib_sa_mad *mad; 1809 int ret; 1810 1811 if (!sa_dev) 1812 return -ENODEV; 1813 1814 port = &sa_dev->port[port_num - sa_dev->start_port]; 1815 agent = port->agent; 1816 1817 query = kzalloc(sizeof(*query), gfp_mask); 1818 if (!query) 1819 return -ENOMEM; 1820 1821 query->sa_query.port = port; 1822 1823 ret = alloc_mad(&query->sa_query, gfp_mask); 1824 if (ret) 1825 goto err1; 1826 1827 ib_sa_client_get(client); 1828 query->sa_query.client = client; 1829 query->callback = callback; 1830 query->context = context; 1831 1832 mad = query->sa_query.mad_buf->mad; 1833 init_mad(&query->sa_query, agent); 1834 1835 query->sa_query.rmpp_callback = callback ? ib_sa_service_rec_callback : 1836 NULL; 1837 query->sa_query.release = ib_sa_service_rec_release; 1838 mad->mad_hdr.method = IB_MGMT_METHOD_GET_TABLE; 1839 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); 1840 mad->sa_hdr.comp_mask = comp_mask; 1841 1842 ib_sa_pack_service(rec, mad->data); 1843 1844 *sa_query = &query->sa_query; 1845 query->sa_query.mad_buf->context[1] = rec; 1846 1847 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1848 if (ret < 0) 1849 goto err2; 1850 1851 return ret; 1852 1853 err2: 1854 *sa_query = NULL; 1855 ib_sa_client_put(query->sa_query.client); 1856 free_mad(&query->sa_query); 1857 err1: 1858 kfree(query); 1859 return ret; 1860 } 1861 EXPORT_SYMBOL(ib_sa_service_rec_get); 1862 1863 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 1864 int status, struct ib_sa_mad *mad) 1865 { 1866 struct ib_sa_mcmember_query *query = 1867 container_of(sa_query, struct ib_sa_mcmember_query, sa_query); 1868 1869 if (mad) { 1870 struct ib_sa_mcmember_rec rec; 1871 1872 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1873 mad->data, &rec); 1874 query->callback(status, &rec, query->context); 1875 } else 1876 query->callback(status, NULL, query->context); 1877 } 1878 1879 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 1880 { 1881 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 1882 } 1883 1884 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 1885 struct ib_device *device, u32 port_num, 1886 u8 method, 1887 struct ib_sa_mcmember_rec *rec, 1888 ib_sa_comp_mask comp_mask, 1889 unsigned long timeout_ms, gfp_t gfp_mask, 1890 void (*callback)(int status, 1891 struct ib_sa_mcmember_rec *resp, 1892 void *context), 1893 void *context, 1894 struct ib_sa_query **sa_query) 1895 { 1896 struct ib_sa_mcmember_query *query; 1897 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1898 struct ib_sa_port *port; 1899 struct ib_mad_agent *agent; 1900 struct ib_sa_mad *mad; 1901 int ret; 1902 1903 if (!sa_dev) 1904 return -ENODEV; 1905 1906 port = &sa_dev->port[port_num - sa_dev->start_port]; 1907 agent = port->agent; 1908 1909 query = kzalloc(sizeof(*query), gfp_mask); 1910 if (!query) 1911 return -ENOMEM; 1912 1913 query->sa_query.port = port; 1914 ret = alloc_mad(&query->sa_query, gfp_mask); 1915 if (ret) 1916 goto err1; 1917 1918 ib_sa_client_get(client); 1919 query->sa_query.client = client; 1920 query->callback = callback; 1921 query->context = context; 1922 1923 mad = query->sa_query.mad_buf->mad; 1924 init_mad(&query->sa_query, agent); 1925 1926 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 1927 query->sa_query.release = ib_sa_mcmember_rec_release; 1928 mad->mad_hdr.method = method; 1929 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 1930 mad->sa_hdr.comp_mask = comp_mask; 1931 1932 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1933 rec, mad->data); 1934 1935 *sa_query = &query->sa_query; 1936 1937 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1938 if (ret < 0) 1939 goto err2; 1940 1941 return ret; 1942 1943 err2: 1944 *sa_query = NULL; 1945 ib_sa_client_put(query->sa_query.client); 1946 free_mad(&query->sa_query); 1947 1948 err1: 1949 kfree(query); 1950 return ret; 1951 } 1952 1953 /* Support GuidInfoRecord */ 1954 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, 1955 int status, struct ib_sa_mad *mad) 1956 { 1957 struct ib_sa_guidinfo_query *query = 1958 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); 1959 1960 if (mad) { 1961 struct ib_sa_guidinfo_rec rec; 1962 1963 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), 1964 mad->data, &rec); 1965 query->callback(status, &rec, query->context); 1966 } else 1967 query->callback(status, NULL, query->context); 1968 } 1969 1970 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) 1971 { 1972 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); 1973 } 1974 1975 int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 1976 struct ib_device *device, u32 port_num, 1977 struct ib_sa_guidinfo_rec *rec, 1978 ib_sa_comp_mask comp_mask, u8 method, 1979 unsigned long timeout_ms, gfp_t gfp_mask, 1980 void (*callback)(int status, 1981 struct ib_sa_guidinfo_rec *resp, 1982 void *context), 1983 void *context, 1984 struct ib_sa_query **sa_query) 1985 { 1986 struct ib_sa_guidinfo_query *query; 1987 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1988 struct ib_sa_port *port; 1989 struct ib_mad_agent *agent; 1990 struct ib_sa_mad *mad; 1991 int ret; 1992 1993 if (!sa_dev) 1994 return -ENODEV; 1995 1996 if (method != IB_MGMT_METHOD_GET && 1997 method != IB_MGMT_METHOD_SET && 1998 method != IB_SA_METHOD_DELETE) { 1999 return -EINVAL; 2000 } 2001 2002 port = &sa_dev->port[port_num - sa_dev->start_port]; 2003 agent = port->agent; 2004 2005 query = kzalloc(sizeof(*query), gfp_mask); 2006 if (!query) 2007 return -ENOMEM; 2008 2009 query->sa_query.port = port; 2010 ret = alloc_mad(&query->sa_query, gfp_mask); 2011 if (ret) 2012 goto err1; 2013 2014 ib_sa_client_get(client); 2015 query->sa_query.client = client; 2016 query->callback = callback; 2017 query->context = context; 2018 2019 mad = query->sa_query.mad_buf->mad; 2020 init_mad(&query->sa_query, agent); 2021 2022 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; 2023 query->sa_query.release = ib_sa_guidinfo_rec_release; 2024 2025 mad->mad_hdr.method = method; 2026 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); 2027 mad->sa_hdr.comp_mask = comp_mask; 2028 2029 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, 2030 mad->data); 2031 2032 *sa_query = &query->sa_query; 2033 2034 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 2035 if (ret < 0) 2036 goto err2; 2037 2038 return ret; 2039 2040 err2: 2041 *sa_query = NULL; 2042 ib_sa_client_put(query->sa_query.client); 2043 free_mad(&query->sa_query); 2044 2045 err1: 2046 kfree(query); 2047 return ret; 2048 } 2049 EXPORT_SYMBOL(ib_sa_guid_info_rec_query); 2050 2051 struct ib_classport_info_context { 2052 struct completion done; 2053 struct ib_sa_query *sa_query; 2054 }; 2055 2056 static void ib_classportinfo_cb(void *context) 2057 { 2058 struct ib_classport_info_context *cb_ctx = context; 2059 2060 complete(&cb_ctx->done); 2061 } 2062 2063 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, 2064 int status, struct ib_sa_mad *mad) 2065 { 2066 unsigned long flags; 2067 struct ib_sa_classport_info_query *query = 2068 container_of(sa_query, struct ib_sa_classport_info_query, sa_query); 2069 struct ib_sa_classport_cache *info = &sa_query->port->classport_info; 2070 2071 if (mad) { 2072 if (sa_query->flags & IB_SA_QUERY_OPA) { 2073 struct opa_class_port_info rec; 2074 2075 ib_unpack(opa_classport_info_rec_table, 2076 ARRAY_SIZE(opa_classport_info_rec_table), 2077 mad->data, &rec); 2078 2079 spin_lock_irqsave(&sa_query->port->classport_lock, 2080 flags); 2081 if (!status && !info->valid) { 2082 memcpy(&info->data.opa, &rec, 2083 sizeof(info->data.opa)); 2084 2085 info->valid = true; 2086 info->data.type = RDMA_CLASS_PORT_INFO_OPA; 2087 } 2088 spin_unlock_irqrestore(&sa_query->port->classport_lock, 2089 flags); 2090 2091 } else { 2092 struct ib_class_port_info rec; 2093 2094 ib_unpack(ib_classport_info_rec_table, 2095 ARRAY_SIZE(ib_classport_info_rec_table), 2096 mad->data, &rec); 2097 2098 spin_lock_irqsave(&sa_query->port->classport_lock, 2099 flags); 2100 if (!status && !info->valid) { 2101 memcpy(&info->data.ib, &rec, 2102 sizeof(info->data.ib)); 2103 2104 info->valid = true; 2105 info->data.type = RDMA_CLASS_PORT_INFO_IB; 2106 } 2107 spin_unlock_irqrestore(&sa_query->port->classport_lock, 2108 flags); 2109 } 2110 } 2111 query->callback(query->context); 2112 } 2113 2114 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query) 2115 { 2116 kfree(container_of(sa_query, struct ib_sa_classport_info_query, 2117 sa_query)); 2118 } 2119 2120 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port, 2121 unsigned long timeout_ms, 2122 void (*callback)(void *context), 2123 void *context, 2124 struct ib_sa_query **sa_query) 2125 { 2126 struct ib_mad_agent *agent; 2127 struct ib_sa_classport_info_query *query; 2128 struct ib_sa_mad *mad; 2129 gfp_t gfp_mask = GFP_KERNEL; 2130 int ret; 2131 2132 agent = port->agent; 2133 2134 query = kzalloc(sizeof(*query), gfp_mask); 2135 if (!query) 2136 return -ENOMEM; 2137 2138 query->sa_query.port = port; 2139 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device, 2140 port->port_num) ? 2141 IB_SA_QUERY_OPA : 0; 2142 ret = alloc_mad(&query->sa_query, gfp_mask); 2143 if (ret) 2144 goto err_free; 2145 2146 query->callback = callback; 2147 query->context = context; 2148 2149 mad = query->sa_query.mad_buf->mad; 2150 init_mad(&query->sa_query, agent); 2151 2152 query->sa_query.callback = ib_sa_classport_info_rec_callback; 2153 query->sa_query.release = ib_sa_classport_info_rec_release; 2154 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 2155 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO); 2156 mad->sa_hdr.comp_mask = 0; 2157 *sa_query = &query->sa_query; 2158 2159 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 2160 if (ret < 0) 2161 goto err_free_mad; 2162 2163 return ret; 2164 2165 err_free_mad: 2166 *sa_query = NULL; 2167 free_mad(&query->sa_query); 2168 2169 err_free: 2170 kfree(query); 2171 return ret; 2172 } 2173 2174 static void update_ib_cpi(struct work_struct *work) 2175 { 2176 struct ib_sa_port *port = 2177 container_of(work, struct ib_sa_port, ib_cpi_work.work); 2178 struct ib_classport_info_context *cb_context; 2179 unsigned long flags; 2180 int ret; 2181 2182 /* If the classport info is valid, nothing 2183 * to do here. 2184 */ 2185 spin_lock_irqsave(&port->classport_lock, flags); 2186 if (port->classport_info.valid) { 2187 spin_unlock_irqrestore(&port->classport_lock, flags); 2188 return; 2189 } 2190 spin_unlock_irqrestore(&port->classport_lock, flags); 2191 2192 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL); 2193 if (!cb_context) 2194 goto err_nomem; 2195 2196 init_completion(&cb_context->done); 2197 2198 ret = ib_sa_classport_info_rec_query(port, 3000, 2199 ib_classportinfo_cb, cb_context, 2200 &cb_context->sa_query); 2201 if (ret < 0) 2202 goto free_cb_err; 2203 wait_for_completion(&cb_context->done); 2204 free_cb_err: 2205 kfree(cb_context); 2206 spin_lock_irqsave(&port->classport_lock, flags); 2207 2208 /* If the classport info is still not valid, the query should have 2209 * failed for some reason. Retry issuing the query 2210 */ 2211 if (!port->classport_info.valid) { 2212 port->classport_info.retry_cnt++; 2213 if (port->classport_info.retry_cnt <= 2214 IB_SA_CPI_MAX_RETRY_CNT) { 2215 unsigned long delay = 2216 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); 2217 2218 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay); 2219 } 2220 } 2221 spin_unlock_irqrestore(&port->classport_lock, flags); 2222 2223 err_nomem: 2224 return; 2225 } 2226 2227 static void send_handler(struct ib_mad_agent *agent, 2228 struct ib_mad_send_wc *mad_send_wc) 2229 { 2230 struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 2231 unsigned long flags; 2232 int status = 0; 2233 2234 if (query->callback || query->rmpp_callback) { 2235 switch (mad_send_wc->status) { 2236 case IB_WC_SUCCESS: 2237 /* No callback -- already got recv */ 2238 break; 2239 case IB_WC_RESP_TIMEOUT_ERR: 2240 status = -ETIMEDOUT; 2241 break; 2242 case IB_WC_WR_FLUSH_ERR: 2243 status = -EINTR; 2244 break; 2245 default: 2246 status = -EIO; 2247 break; 2248 } 2249 2250 if (status) 2251 query->callback ? query->callback(query, status, NULL) : 2252 query->rmpp_callback(query, status, NULL); 2253 } 2254 2255 xa_lock_irqsave(&queries, flags); 2256 __xa_erase(&queries, query->id); 2257 xa_unlock_irqrestore(&queries, flags); 2258 2259 free_mad(query); 2260 if (query->client) 2261 ib_sa_client_put(query->client); 2262 query->release(query); 2263 } 2264 2265 static void recv_handler(struct ib_mad_agent *mad_agent, 2266 struct ib_mad_send_buf *send_buf, 2267 struct ib_mad_recv_wc *mad_recv_wc) 2268 { 2269 struct ib_sa_query *query; 2270 struct ib_mad *mad; 2271 2272 2273 if (!send_buf) 2274 return; 2275 2276 query = send_buf->context[0]; 2277 mad = mad_recv_wc->recv_buf.mad; 2278 2279 if (query->rmpp_callback) { 2280 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 2281 query->rmpp_callback(query, mad->mad_hdr.status ? 2282 -EINVAL : 0, mad_recv_wc); 2283 else 2284 query->rmpp_callback(query, -EIO, NULL); 2285 } else if (query->callback) { 2286 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 2287 query->callback(query, mad->mad_hdr.status ? 2288 -EINVAL : 0, (struct ib_sa_mad *)mad); 2289 else 2290 query->callback(query, -EIO, NULL); 2291 } 2292 2293 ib_free_recv_mad(mad_recv_wc); 2294 } 2295 2296 static void update_sm_ah(struct work_struct *work) 2297 { 2298 struct ib_sa_port *port = 2299 container_of(work, struct ib_sa_port, update_task); 2300 struct ib_sa_sm_ah *new_ah; 2301 struct ib_port_attr port_attr; 2302 struct rdma_ah_attr ah_attr; 2303 bool grh_required; 2304 2305 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { 2306 pr_warn("Couldn't query port\n"); 2307 return; 2308 } 2309 2310 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL); 2311 if (!new_ah) 2312 return; 2313 2314 kref_init(&new_ah->ref); 2315 new_ah->src_path_mask = (1 << port_attr.lmc) - 1; 2316 2317 new_ah->pkey_index = 0; 2318 if (ib_find_pkey(port->agent->device, port->port_num, 2319 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) 2320 pr_err("Couldn't find index for default PKey\n"); 2321 2322 memset(&ah_attr, 0, sizeof(ah_attr)); 2323 ah_attr.type = rdma_ah_find_type(port->agent->device, 2324 port->port_num); 2325 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid); 2326 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl); 2327 rdma_ah_set_port_num(&ah_attr, port->port_num); 2328 2329 grh_required = rdma_is_grh_required(port->agent->device, 2330 port->port_num); 2331 2332 /* 2333 * The OPA sm_lid of 0xFFFF needs special handling so that it can be 2334 * differentiated from a permissive LID of 0xFFFF. We set the 2335 * grh_required flag here so the SA can program the DGID in the 2336 * address handle appropriately 2337 */ 2338 if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA && 2339 (grh_required || 2340 port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE))) 2341 rdma_ah_set_make_grd(&ah_attr, true); 2342 2343 if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) { 2344 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH); 2345 rdma_ah_set_subnet_prefix(&ah_attr, 2346 cpu_to_be64(port_attr.subnet_prefix)); 2347 rdma_ah_set_interface_id(&ah_attr, 2348 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)); 2349 } 2350 2351 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr, 2352 RDMA_CREATE_AH_SLEEPABLE); 2353 if (IS_ERR(new_ah->ah)) { 2354 pr_warn("Couldn't create new SM AH\n"); 2355 kfree(new_ah); 2356 return; 2357 } 2358 2359 spin_lock_irq(&port->ah_lock); 2360 if (port->sm_ah) 2361 kref_put(&port->sm_ah->ref, free_sm_ah); 2362 port->sm_ah = new_ah; 2363 spin_unlock_irq(&port->ah_lock); 2364 } 2365 2366 static void ib_sa_event(struct ib_event_handler *handler, 2367 struct ib_event *event) 2368 { 2369 if (event->event == IB_EVENT_PORT_ERR || 2370 event->event == IB_EVENT_PORT_ACTIVE || 2371 event->event == IB_EVENT_LID_CHANGE || 2372 event->event == IB_EVENT_PKEY_CHANGE || 2373 event->event == IB_EVENT_SM_CHANGE || 2374 event->event == IB_EVENT_CLIENT_REREGISTER) { 2375 unsigned long flags; 2376 struct ib_sa_device *sa_dev = 2377 container_of(handler, typeof(*sa_dev), event_handler); 2378 u32 port_num = event->element.port_num - sa_dev->start_port; 2379 struct ib_sa_port *port = &sa_dev->port[port_num]; 2380 2381 if (!rdma_cap_ib_sa(handler->device, port->port_num)) 2382 return; 2383 2384 spin_lock_irqsave(&port->ah_lock, flags); 2385 if (port->sm_ah) 2386 kref_put(&port->sm_ah->ref, free_sm_ah); 2387 port->sm_ah = NULL; 2388 spin_unlock_irqrestore(&port->ah_lock, flags); 2389 2390 if (event->event == IB_EVENT_SM_CHANGE || 2391 event->event == IB_EVENT_CLIENT_REREGISTER || 2392 event->event == IB_EVENT_LID_CHANGE || 2393 event->event == IB_EVENT_PORT_ACTIVE) { 2394 unsigned long delay = 2395 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); 2396 2397 spin_lock_irqsave(&port->classport_lock, flags); 2398 port->classport_info.valid = false; 2399 port->classport_info.retry_cnt = 0; 2400 spin_unlock_irqrestore(&port->classport_lock, flags); 2401 queue_delayed_work(ib_wq, 2402 &port->ib_cpi_work, delay); 2403 } 2404 queue_work(ib_wq, &sa_dev->port[port_num].update_task); 2405 } 2406 } 2407 2408 static int ib_sa_add_one(struct ib_device *device) 2409 { 2410 struct ib_sa_device *sa_dev; 2411 int s, e, i; 2412 int count = 0; 2413 int ret; 2414 2415 s = rdma_start_port(device); 2416 e = rdma_end_port(device); 2417 2418 sa_dev = kzalloc(struct_size(sa_dev, port, 2419 size_add(size_sub(e, s), 1)), 2420 GFP_KERNEL); 2421 if (!sa_dev) 2422 return -ENOMEM; 2423 2424 sa_dev->start_port = s; 2425 sa_dev->end_port = e; 2426 2427 for (i = 0; i <= e - s; ++i) { 2428 spin_lock_init(&sa_dev->port[i].ah_lock); 2429 if (!rdma_cap_ib_sa(device, i + 1)) 2430 continue; 2431 2432 sa_dev->port[i].sm_ah = NULL; 2433 sa_dev->port[i].port_num = i + s; 2434 2435 spin_lock_init(&sa_dev->port[i].classport_lock); 2436 sa_dev->port[i].classport_info.valid = false; 2437 2438 sa_dev->port[i].agent = 2439 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 2440 NULL, IB_MGMT_RMPP_VERSION, 2441 send_handler, recv_handler, 2442 sa_dev, 0); 2443 if (IS_ERR(sa_dev->port[i].agent)) { 2444 ret = PTR_ERR(sa_dev->port[i].agent); 2445 goto err; 2446 } 2447 2448 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 2449 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work, 2450 update_ib_cpi); 2451 2452 count++; 2453 } 2454 2455 if (!count) { 2456 ret = -EOPNOTSUPP; 2457 goto free; 2458 } 2459 2460 ib_set_client_data(device, &sa_client, sa_dev); 2461 2462 /* 2463 * We register our event handler after everything is set up, 2464 * and then update our cached info after the event handler is 2465 * registered to avoid any problems if a port changes state 2466 * during our initialization. 2467 */ 2468 2469 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); 2470 ib_register_event_handler(&sa_dev->event_handler); 2471 2472 for (i = 0; i <= e - s; ++i) { 2473 if (rdma_cap_ib_sa(device, i + 1)) 2474 update_sm_ah(&sa_dev->port[i].update_task); 2475 } 2476 2477 return 0; 2478 2479 err: 2480 while (--i >= 0) { 2481 if (rdma_cap_ib_sa(device, i + 1)) 2482 ib_unregister_mad_agent(sa_dev->port[i].agent); 2483 } 2484 free: 2485 kfree(sa_dev); 2486 return ret; 2487 } 2488 2489 static void ib_sa_remove_one(struct ib_device *device, void *client_data) 2490 { 2491 struct ib_sa_device *sa_dev = client_data; 2492 int i; 2493 2494 ib_unregister_event_handler(&sa_dev->event_handler); 2495 flush_workqueue(ib_wq); 2496 2497 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 2498 if (rdma_cap_ib_sa(device, i + 1)) { 2499 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work); 2500 ib_unregister_mad_agent(sa_dev->port[i].agent); 2501 if (sa_dev->port[i].sm_ah) 2502 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 2503 } 2504 2505 } 2506 2507 kfree(sa_dev); 2508 } 2509 2510 int ib_sa_init(void) 2511 { 2512 int ret; 2513 2514 get_random_bytes(&tid, sizeof tid); 2515 2516 atomic_set(&ib_nl_sa_request_seq, 0); 2517 2518 ret = ib_register_client(&sa_client); 2519 if (ret) { 2520 pr_err("Couldn't register ib_sa client\n"); 2521 goto err1; 2522 } 2523 2524 ret = mcast_init(); 2525 if (ret) { 2526 pr_err("Couldn't initialize multicast handling\n"); 2527 goto err2; 2528 } 2529 2530 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM); 2531 if (!ib_nl_wq) { 2532 ret = -ENOMEM; 2533 goto err3; 2534 } 2535 2536 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); 2537 2538 return 0; 2539 2540 err3: 2541 mcast_cleanup(); 2542 err2: 2543 ib_unregister_client(&sa_client); 2544 err1: 2545 return ret; 2546 } 2547 2548 void ib_sa_cleanup(void) 2549 { 2550 cancel_delayed_work(&ib_nl_timed_work); 2551 destroy_workqueue(ib_nl_wq); 2552 mcast_cleanup(); 2553 ib_unregister_client(&sa_client); 2554 WARN_ON(!xa_empty(&queries)); 2555 } 2556