1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/init.h> 36 #include <linux/err.h> 37 #include <linux/random.h> 38 #include <linux/spinlock.h> 39 #include <linux/slab.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/kref.h> 42 #include <linux/xarray.h> 43 #include <linux/workqueue.h> 44 #include <uapi/linux/if_ether.h> 45 #include <rdma/ib_pack.h> 46 #include <rdma/ib_cache.h> 47 #include <rdma/rdma_netlink.h> 48 #include <net/netlink.h> 49 #include <uapi/rdma/ib_user_sa.h> 50 #include <rdma/ib_marshall.h> 51 #include <rdma/ib_addr.h> 52 #include <rdma/opa_addr.h> 53 #include <rdma/rdma_cm.h> 54 #include "sa.h" 55 #include "core_priv.h" 56 57 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 58 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 59 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 60 #define IB_SA_CPI_MAX_RETRY_CNT 3 61 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */ 62 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT; 63 64 struct ib_sa_sm_ah { 65 struct ib_ah *ah; 66 struct kref ref; 67 u16 pkey_index; 68 u8 src_path_mask; 69 }; 70 71 enum rdma_class_port_info_type { 72 RDMA_CLASS_PORT_INFO_IB, 73 RDMA_CLASS_PORT_INFO_OPA 74 }; 75 76 struct rdma_class_port_info { 77 enum rdma_class_port_info_type type; 78 union { 79 struct ib_class_port_info ib; 80 struct opa_class_port_info opa; 81 }; 82 }; 83 84 struct ib_sa_classport_cache { 85 bool valid; 86 int retry_cnt; 87 struct rdma_class_port_info data; 88 }; 89 90 struct ib_sa_port { 91 struct ib_mad_agent *agent; 92 struct ib_sa_sm_ah *sm_ah; 93 struct work_struct update_task; 94 struct ib_sa_classport_cache classport_info; 95 struct delayed_work ib_cpi_work; 96 spinlock_t classport_lock; /* protects class port info set */ 97 spinlock_t ah_lock; 98 u32 port_num; 99 }; 100 101 struct ib_sa_device { 102 int start_port, end_port; 103 struct ib_event_handler event_handler; 104 struct ib_sa_port port[]; 105 }; 106 107 struct ib_sa_query { 108 void (*callback)(struct ib_sa_query *sa_query, int status, 109 int num_prs, struct ib_sa_mad *mad); 110 void (*release)(struct ib_sa_query *); 111 struct ib_sa_client *client; 112 struct ib_sa_port *port; 113 struct ib_mad_send_buf *mad_buf; 114 struct ib_sa_sm_ah *sm_ah; 115 int id; 116 u32 flags; 117 struct list_head list; /* Local svc request list */ 118 u32 seq; /* Local svc request sequence number */ 119 unsigned long timeout; /* Local svc timeout */ 120 u8 path_use; /* How will the pathrecord be used */ 121 122 /* A separate buffer to save pathrecords of a response, as in cases 123 * like IB/netlink, mulptiple pathrecords are supported, so that 124 * mad->data is not large enough to hold them 125 */ 126 void *resp_pr_data; 127 }; 128 129 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 130 #define IB_SA_CANCEL 0x00000002 131 #define IB_SA_QUERY_OPA 0x00000004 132 133 struct ib_sa_path_query { 134 void (*callback)(int status, struct sa_path_rec *rec, 135 int num_paths, void *context); 136 void *context; 137 struct ib_sa_query sa_query; 138 struct sa_path_rec *conv_pr; 139 }; 140 141 struct ib_sa_guidinfo_query { 142 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); 143 void *context; 144 struct ib_sa_query sa_query; 145 }; 146 147 struct ib_sa_classport_info_query { 148 void (*callback)(void *); 149 void *context; 150 struct ib_sa_query sa_query; 151 }; 152 153 struct ib_sa_mcmember_query { 154 void (*callback)(int, struct ib_sa_mcmember_rec *, void *); 155 void *context; 156 struct ib_sa_query sa_query; 157 }; 158 159 static LIST_HEAD(ib_nl_request_list); 160 static DEFINE_SPINLOCK(ib_nl_request_lock); 161 static atomic_t ib_nl_sa_request_seq; 162 static struct workqueue_struct *ib_nl_wq; 163 static struct delayed_work ib_nl_timed_work; 164 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = { 165 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY, 166 .len = sizeof(struct ib_path_rec_data)}, 167 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32}, 168 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64}, 169 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, 170 .len = sizeof(struct rdma_nla_ls_gid)}, 171 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY, 172 .len = sizeof(struct rdma_nla_ls_gid)}, 173 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8}, 174 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16}, 175 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16}, 176 }; 177 178 179 static int ib_sa_add_one(struct ib_device *device); 180 static void ib_sa_remove_one(struct ib_device *device, void *client_data); 181 182 static struct ib_client sa_client = { 183 .name = "sa", 184 .add = ib_sa_add_one, 185 .remove = ib_sa_remove_one 186 }; 187 188 static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); 189 190 static DEFINE_SPINLOCK(tid_lock); 191 static u32 tid; 192 193 #define PATH_REC_FIELD(field) \ 194 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \ 195 .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \ 196 .field_name = "sa_path_rec:" #field 197 198 static const struct ib_field path_rec_table[] = { 199 { PATH_REC_FIELD(service_id), 200 .offset_words = 0, 201 .offset_bits = 0, 202 .size_bits = 64 }, 203 { PATH_REC_FIELD(dgid), 204 .offset_words = 2, 205 .offset_bits = 0, 206 .size_bits = 128 }, 207 { PATH_REC_FIELD(sgid), 208 .offset_words = 6, 209 .offset_bits = 0, 210 .size_bits = 128 }, 211 { PATH_REC_FIELD(ib.dlid), 212 .offset_words = 10, 213 .offset_bits = 0, 214 .size_bits = 16 }, 215 { PATH_REC_FIELD(ib.slid), 216 .offset_words = 10, 217 .offset_bits = 16, 218 .size_bits = 16 }, 219 { PATH_REC_FIELD(ib.raw_traffic), 220 .offset_words = 11, 221 .offset_bits = 0, 222 .size_bits = 1 }, 223 { RESERVED, 224 .offset_words = 11, 225 .offset_bits = 1, 226 .size_bits = 3 }, 227 { PATH_REC_FIELD(flow_label), 228 .offset_words = 11, 229 .offset_bits = 4, 230 .size_bits = 20 }, 231 { PATH_REC_FIELD(hop_limit), 232 .offset_words = 11, 233 .offset_bits = 24, 234 .size_bits = 8 }, 235 { PATH_REC_FIELD(traffic_class), 236 .offset_words = 12, 237 .offset_bits = 0, 238 .size_bits = 8 }, 239 { PATH_REC_FIELD(reversible), 240 .offset_words = 12, 241 .offset_bits = 8, 242 .size_bits = 1 }, 243 { PATH_REC_FIELD(numb_path), 244 .offset_words = 12, 245 .offset_bits = 9, 246 .size_bits = 7 }, 247 { PATH_REC_FIELD(pkey), 248 .offset_words = 12, 249 .offset_bits = 16, 250 .size_bits = 16 }, 251 { PATH_REC_FIELD(qos_class), 252 .offset_words = 13, 253 .offset_bits = 0, 254 .size_bits = 12 }, 255 { PATH_REC_FIELD(sl), 256 .offset_words = 13, 257 .offset_bits = 12, 258 .size_bits = 4 }, 259 { PATH_REC_FIELD(mtu_selector), 260 .offset_words = 13, 261 .offset_bits = 16, 262 .size_bits = 2 }, 263 { PATH_REC_FIELD(mtu), 264 .offset_words = 13, 265 .offset_bits = 18, 266 .size_bits = 6 }, 267 { PATH_REC_FIELD(rate_selector), 268 .offset_words = 13, 269 .offset_bits = 24, 270 .size_bits = 2 }, 271 { PATH_REC_FIELD(rate), 272 .offset_words = 13, 273 .offset_bits = 26, 274 .size_bits = 6 }, 275 { PATH_REC_FIELD(packet_life_time_selector), 276 .offset_words = 14, 277 .offset_bits = 0, 278 .size_bits = 2 }, 279 { PATH_REC_FIELD(packet_life_time), 280 .offset_words = 14, 281 .offset_bits = 2, 282 .size_bits = 6 }, 283 { PATH_REC_FIELD(preference), 284 .offset_words = 14, 285 .offset_bits = 8, 286 .size_bits = 8 }, 287 { RESERVED, 288 .offset_words = 14, 289 .offset_bits = 16, 290 .size_bits = 48 }, 291 }; 292 293 #define OPA_PATH_REC_FIELD(field) \ 294 .struct_offset_bytes = \ 295 offsetof(struct sa_path_rec, field), \ 296 .struct_size_bytes = \ 297 sizeof_field(struct sa_path_rec, field), \ 298 .field_name = "sa_path_rec:" #field 299 300 static const struct ib_field opa_path_rec_table[] = { 301 { OPA_PATH_REC_FIELD(service_id), 302 .offset_words = 0, 303 .offset_bits = 0, 304 .size_bits = 64 }, 305 { OPA_PATH_REC_FIELD(dgid), 306 .offset_words = 2, 307 .offset_bits = 0, 308 .size_bits = 128 }, 309 { OPA_PATH_REC_FIELD(sgid), 310 .offset_words = 6, 311 .offset_bits = 0, 312 .size_bits = 128 }, 313 { OPA_PATH_REC_FIELD(opa.dlid), 314 .offset_words = 10, 315 .offset_bits = 0, 316 .size_bits = 32 }, 317 { OPA_PATH_REC_FIELD(opa.slid), 318 .offset_words = 11, 319 .offset_bits = 0, 320 .size_bits = 32 }, 321 { OPA_PATH_REC_FIELD(opa.raw_traffic), 322 .offset_words = 12, 323 .offset_bits = 0, 324 .size_bits = 1 }, 325 { RESERVED, 326 .offset_words = 12, 327 .offset_bits = 1, 328 .size_bits = 3 }, 329 { OPA_PATH_REC_FIELD(flow_label), 330 .offset_words = 12, 331 .offset_bits = 4, 332 .size_bits = 20 }, 333 { OPA_PATH_REC_FIELD(hop_limit), 334 .offset_words = 12, 335 .offset_bits = 24, 336 .size_bits = 8 }, 337 { OPA_PATH_REC_FIELD(traffic_class), 338 .offset_words = 13, 339 .offset_bits = 0, 340 .size_bits = 8 }, 341 { OPA_PATH_REC_FIELD(reversible), 342 .offset_words = 13, 343 .offset_bits = 8, 344 .size_bits = 1 }, 345 { OPA_PATH_REC_FIELD(numb_path), 346 .offset_words = 13, 347 .offset_bits = 9, 348 .size_bits = 7 }, 349 { OPA_PATH_REC_FIELD(pkey), 350 .offset_words = 13, 351 .offset_bits = 16, 352 .size_bits = 16 }, 353 { OPA_PATH_REC_FIELD(opa.l2_8B), 354 .offset_words = 14, 355 .offset_bits = 0, 356 .size_bits = 1 }, 357 { OPA_PATH_REC_FIELD(opa.l2_10B), 358 .offset_words = 14, 359 .offset_bits = 1, 360 .size_bits = 1 }, 361 { OPA_PATH_REC_FIELD(opa.l2_9B), 362 .offset_words = 14, 363 .offset_bits = 2, 364 .size_bits = 1 }, 365 { OPA_PATH_REC_FIELD(opa.l2_16B), 366 .offset_words = 14, 367 .offset_bits = 3, 368 .size_bits = 1 }, 369 { RESERVED, 370 .offset_words = 14, 371 .offset_bits = 4, 372 .size_bits = 2 }, 373 { OPA_PATH_REC_FIELD(opa.qos_type), 374 .offset_words = 14, 375 .offset_bits = 6, 376 .size_bits = 2 }, 377 { OPA_PATH_REC_FIELD(opa.qos_priority), 378 .offset_words = 14, 379 .offset_bits = 8, 380 .size_bits = 8 }, 381 { RESERVED, 382 .offset_words = 14, 383 .offset_bits = 16, 384 .size_bits = 3 }, 385 { OPA_PATH_REC_FIELD(sl), 386 .offset_words = 14, 387 .offset_bits = 19, 388 .size_bits = 5 }, 389 { RESERVED, 390 .offset_words = 14, 391 .offset_bits = 24, 392 .size_bits = 8 }, 393 { OPA_PATH_REC_FIELD(mtu_selector), 394 .offset_words = 15, 395 .offset_bits = 0, 396 .size_bits = 2 }, 397 { OPA_PATH_REC_FIELD(mtu), 398 .offset_words = 15, 399 .offset_bits = 2, 400 .size_bits = 6 }, 401 { OPA_PATH_REC_FIELD(rate_selector), 402 .offset_words = 15, 403 .offset_bits = 8, 404 .size_bits = 2 }, 405 { OPA_PATH_REC_FIELD(rate), 406 .offset_words = 15, 407 .offset_bits = 10, 408 .size_bits = 6 }, 409 { OPA_PATH_REC_FIELD(packet_life_time_selector), 410 .offset_words = 15, 411 .offset_bits = 16, 412 .size_bits = 2 }, 413 { OPA_PATH_REC_FIELD(packet_life_time), 414 .offset_words = 15, 415 .offset_bits = 18, 416 .size_bits = 6 }, 417 { OPA_PATH_REC_FIELD(preference), 418 .offset_words = 15, 419 .offset_bits = 24, 420 .size_bits = 8 }, 421 }; 422 423 #define MCMEMBER_REC_FIELD(field) \ 424 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ 425 .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \ 426 .field_name = "sa_mcmember_rec:" #field 427 428 static const struct ib_field mcmember_rec_table[] = { 429 { MCMEMBER_REC_FIELD(mgid), 430 .offset_words = 0, 431 .offset_bits = 0, 432 .size_bits = 128 }, 433 { MCMEMBER_REC_FIELD(port_gid), 434 .offset_words = 4, 435 .offset_bits = 0, 436 .size_bits = 128 }, 437 { MCMEMBER_REC_FIELD(qkey), 438 .offset_words = 8, 439 .offset_bits = 0, 440 .size_bits = 32 }, 441 { MCMEMBER_REC_FIELD(mlid), 442 .offset_words = 9, 443 .offset_bits = 0, 444 .size_bits = 16 }, 445 { MCMEMBER_REC_FIELD(mtu_selector), 446 .offset_words = 9, 447 .offset_bits = 16, 448 .size_bits = 2 }, 449 { MCMEMBER_REC_FIELD(mtu), 450 .offset_words = 9, 451 .offset_bits = 18, 452 .size_bits = 6 }, 453 { MCMEMBER_REC_FIELD(traffic_class), 454 .offset_words = 9, 455 .offset_bits = 24, 456 .size_bits = 8 }, 457 { MCMEMBER_REC_FIELD(pkey), 458 .offset_words = 10, 459 .offset_bits = 0, 460 .size_bits = 16 }, 461 { MCMEMBER_REC_FIELD(rate_selector), 462 .offset_words = 10, 463 .offset_bits = 16, 464 .size_bits = 2 }, 465 { MCMEMBER_REC_FIELD(rate), 466 .offset_words = 10, 467 .offset_bits = 18, 468 .size_bits = 6 }, 469 { MCMEMBER_REC_FIELD(packet_life_time_selector), 470 .offset_words = 10, 471 .offset_bits = 24, 472 .size_bits = 2 }, 473 { MCMEMBER_REC_FIELD(packet_life_time), 474 .offset_words = 10, 475 .offset_bits = 26, 476 .size_bits = 6 }, 477 { MCMEMBER_REC_FIELD(sl), 478 .offset_words = 11, 479 .offset_bits = 0, 480 .size_bits = 4 }, 481 { MCMEMBER_REC_FIELD(flow_label), 482 .offset_words = 11, 483 .offset_bits = 4, 484 .size_bits = 20 }, 485 { MCMEMBER_REC_FIELD(hop_limit), 486 .offset_words = 11, 487 .offset_bits = 24, 488 .size_bits = 8 }, 489 { MCMEMBER_REC_FIELD(scope), 490 .offset_words = 12, 491 .offset_bits = 0, 492 .size_bits = 4 }, 493 { MCMEMBER_REC_FIELD(join_state), 494 .offset_words = 12, 495 .offset_bits = 4, 496 .size_bits = 4 }, 497 { MCMEMBER_REC_FIELD(proxy_join), 498 .offset_words = 12, 499 .offset_bits = 8, 500 .size_bits = 1 }, 501 { RESERVED, 502 .offset_words = 12, 503 .offset_bits = 9, 504 .size_bits = 23 }, 505 }; 506 507 #define CLASSPORTINFO_REC_FIELD(field) \ 508 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \ 509 .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \ 510 .field_name = "ib_class_port_info:" #field 511 512 static const struct ib_field ib_classport_info_rec_table[] = { 513 { CLASSPORTINFO_REC_FIELD(base_version), 514 .offset_words = 0, 515 .offset_bits = 0, 516 .size_bits = 8 }, 517 { CLASSPORTINFO_REC_FIELD(class_version), 518 .offset_words = 0, 519 .offset_bits = 8, 520 .size_bits = 8 }, 521 { CLASSPORTINFO_REC_FIELD(capability_mask), 522 .offset_words = 0, 523 .offset_bits = 16, 524 .size_bits = 16 }, 525 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 526 .offset_words = 1, 527 .offset_bits = 0, 528 .size_bits = 32 }, 529 { CLASSPORTINFO_REC_FIELD(redirect_gid), 530 .offset_words = 2, 531 .offset_bits = 0, 532 .size_bits = 128 }, 533 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl), 534 .offset_words = 6, 535 .offset_bits = 0, 536 .size_bits = 32 }, 537 { CLASSPORTINFO_REC_FIELD(redirect_lid), 538 .offset_words = 7, 539 .offset_bits = 0, 540 .size_bits = 16 }, 541 { CLASSPORTINFO_REC_FIELD(redirect_pkey), 542 .offset_words = 7, 543 .offset_bits = 16, 544 .size_bits = 16 }, 545 546 { CLASSPORTINFO_REC_FIELD(redirect_qp), 547 .offset_words = 8, 548 .offset_bits = 0, 549 .size_bits = 32 }, 550 { CLASSPORTINFO_REC_FIELD(redirect_qkey), 551 .offset_words = 9, 552 .offset_bits = 0, 553 .size_bits = 32 }, 554 555 { CLASSPORTINFO_REC_FIELD(trap_gid), 556 .offset_words = 10, 557 .offset_bits = 0, 558 .size_bits = 128 }, 559 { CLASSPORTINFO_REC_FIELD(trap_tcslfl), 560 .offset_words = 14, 561 .offset_bits = 0, 562 .size_bits = 32 }, 563 564 { CLASSPORTINFO_REC_FIELD(trap_lid), 565 .offset_words = 15, 566 .offset_bits = 0, 567 .size_bits = 16 }, 568 { CLASSPORTINFO_REC_FIELD(trap_pkey), 569 .offset_words = 15, 570 .offset_bits = 16, 571 .size_bits = 16 }, 572 573 { CLASSPORTINFO_REC_FIELD(trap_hlqp), 574 .offset_words = 16, 575 .offset_bits = 0, 576 .size_bits = 32 }, 577 { CLASSPORTINFO_REC_FIELD(trap_qkey), 578 .offset_words = 17, 579 .offset_bits = 0, 580 .size_bits = 32 }, 581 }; 582 583 #define OPA_CLASSPORTINFO_REC_FIELD(field) \ 584 .struct_offset_bytes =\ 585 offsetof(struct opa_class_port_info, field), \ 586 .struct_size_bytes = \ 587 sizeof_field(struct opa_class_port_info, field), \ 588 .field_name = "opa_class_port_info:" #field 589 590 static const struct ib_field opa_classport_info_rec_table[] = { 591 { OPA_CLASSPORTINFO_REC_FIELD(base_version), 592 .offset_words = 0, 593 .offset_bits = 0, 594 .size_bits = 8 }, 595 { OPA_CLASSPORTINFO_REC_FIELD(class_version), 596 .offset_words = 0, 597 .offset_bits = 8, 598 .size_bits = 8 }, 599 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask), 600 .offset_words = 0, 601 .offset_bits = 16, 602 .size_bits = 16 }, 603 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 604 .offset_words = 1, 605 .offset_bits = 0, 606 .size_bits = 32 }, 607 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid), 608 .offset_words = 2, 609 .offset_bits = 0, 610 .size_bits = 128 }, 611 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl), 612 .offset_words = 6, 613 .offset_bits = 0, 614 .size_bits = 32 }, 615 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid), 616 .offset_words = 7, 617 .offset_bits = 0, 618 .size_bits = 32 }, 619 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp), 620 .offset_words = 8, 621 .offset_bits = 0, 622 .size_bits = 32 }, 623 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey), 624 .offset_words = 9, 625 .offset_bits = 0, 626 .size_bits = 32 }, 627 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid), 628 .offset_words = 10, 629 .offset_bits = 0, 630 .size_bits = 128 }, 631 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl), 632 .offset_words = 14, 633 .offset_bits = 0, 634 .size_bits = 32 }, 635 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid), 636 .offset_words = 15, 637 .offset_bits = 0, 638 .size_bits = 32 }, 639 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp), 640 .offset_words = 16, 641 .offset_bits = 0, 642 .size_bits = 32 }, 643 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey), 644 .offset_words = 17, 645 .offset_bits = 0, 646 .size_bits = 32 }, 647 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey), 648 .offset_words = 18, 649 .offset_bits = 0, 650 .size_bits = 16 }, 651 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey), 652 .offset_words = 18, 653 .offset_bits = 16, 654 .size_bits = 16 }, 655 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd), 656 .offset_words = 19, 657 .offset_bits = 0, 658 .size_bits = 8 }, 659 { RESERVED, 660 .offset_words = 19, 661 .offset_bits = 8, 662 .size_bits = 24 }, 663 }; 664 665 #define GUIDINFO_REC_FIELD(field) \ 666 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ 667 .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \ 668 .field_name = "sa_guidinfo_rec:" #field 669 670 static const struct ib_field guidinfo_rec_table[] = { 671 { GUIDINFO_REC_FIELD(lid), 672 .offset_words = 0, 673 .offset_bits = 0, 674 .size_bits = 16 }, 675 { GUIDINFO_REC_FIELD(block_num), 676 .offset_words = 0, 677 .offset_bits = 16, 678 .size_bits = 8 }, 679 { GUIDINFO_REC_FIELD(res1), 680 .offset_words = 0, 681 .offset_bits = 24, 682 .size_bits = 8 }, 683 { GUIDINFO_REC_FIELD(res2), 684 .offset_words = 1, 685 .offset_bits = 0, 686 .size_bits = 32 }, 687 { GUIDINFO_REC_FIELD(guid_info_list), 688 .offset_words = 2, 689 .offset_bits = 0, 690 .size_bits = 512 }, 691 }; 692 693 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) 694 { 695 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; 696 } 697 698 static inline int ib_sa_query_cancelled(struct ib_sa_query *query) 699 { 700 return (query->flags & IB_SA_CANCEL); 701 } 702 703 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, 704 struct ib_sa_query *query) 705 { 706 struct sa_path_rec *sa_rec = query->mad_buf->context[1]; 707 struct ib_sa_mad *mad = query->mad_buf->mad; 708 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; 709 u16 val16; 710 u64 val64; 711 struct rdma_ls_resolve_header *header; 712 713 query->mad_buf->context[1] = NULL; 714 715 /* Construct the family header first */ 716 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 717 strscpy_pad(header->device_name, 718 dev_name(&query->port->agent->device->dev), 719 LS_DEVICE_NAME_MAX); 720 header->port_num = query->port->port_num; 721 722 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && 723 sa_rec->reversible != 0) 724 query->path_use = LS_RESOLVE_PATH_USE_ALL; 725 else 726 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; 727 header->path_use = query->path_use; 728 729 /* Now build the attributes */ 730 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 731 val64 = be64_to_cpu(sa_rec->service_id); 732 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 733 sizeof(val64), &val64); 734 } 735 if (comp_mask & IB_SA_PATH_REC_DGID) 736 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID, 737 sizeof(sa_rec->dgid), &sa_rec->dgid); 738 if (comp_mask & IB_SA_PATH_REC_SGID) 739 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID, 740 sizeof(sa_rec->sgid), &sa_rec->sgid); 741 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 742 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS, 743 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class); 744 745 if (comp_mask & IB_SA_PATH_REC_PKEY) { 746 val16 = be16_to_cpu(sa_rec->pkey); 747 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY, 748 sizeof(val16), &val16); 749 } 750 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) { 751 val16 = be16_to_cpu(sa_rec->qos_class); 752 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS, 753 sizeof(val16), &val16); 754 } 755 } 756 757 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) 758 { 759 int len = 0; 760 761 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) 762 len += nla_total_size(sizeof(u64)); 763 if (comp_mask & IB_SA_PATH_REC_DGID) 764 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 765 if (comp_mask & IB_SA_PATH_REC_SGID) 766 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 767 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 768 len += nla_total_size(sizeof(u8)); 769 if (comp_mask & IB_SA_PATH_REC_PKEY) 770 len += nla_total_size(sizeof(u16)); 771 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) 772 len += nla_total_size(sizeof(u16)); 773 774 /* 775 * Make sure that at least some of the required comp_mask bits are 776 * set. 777 */ 778 if (WARN_ON(len == 0)) 779 return len; 780 781 /* Add the family header */ 782 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header)); 783 784 return len; 785 } 786 787 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) 788 { 789 struct sk_buff *skb = NULL; 790 struct nlmsghdr *nlh; 791 void *data; 792 struct ib_sa_mad *mad; 793 int len; 794 unsigned long flags; 795 unsigned long delay; 796 gfp_t gfp_flag; 797 int ret; 798 799 INIT_LIST_HEAD(&query->list); 800 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 801 802 mad = query->mad_buf->mad; 803 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); 804 if (len <= 0) 805 return -EMSGSIZE; 806 807 skb = nlmsg_new(len, gfp_mask); 808 if (!skb) 809 return -ENOMEM; 810 811 /* Put nlmsg header only for now */ 812 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, 813 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); 814 if (!data) { 815 nlmsg_free(skb); 816 return -EMSGSIZE; 817 } 818 819 /* Add attributes */ 820 ib_nl_set_path_rec_attrs(skb, query); 821 822 /* Repair the nlmsg header length */ 823 nlmsg_end(skb, nlh); 824 825 gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC : 826 GFP_NOWAIT; 827 828 spin_lock_irqsave(&ib_nl_request_lock, flags); 829 ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag); 830 831 if (ret) 832 goto out; 833 834 /* Put the request on the list.*/ 835 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 836 query->timeout = delay + jiffies; 837 list_add_tail(&query->list, &ib_nl_request_list); 838 /* Start the timeout if this is the only request */ 839 if (ib_nl_request_list.next == &query->list) 840 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 841 842 out: 843 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 844 845 return ret; 846 } 847 848 static int ib_nl_cancel_request(struct ib_sa_query *query) 849 { 850 unsigned long flags; 851 struct ib_sa_query *wait_query; 852 int found = 0; 853 854 spin_lock_irqsave(&ib_nl_request_lock, flags); 855 list_for_each_entry(wait_query, &ib_nl_request_list, list) { 856 /* Let the timeout to take care of the callback */ 857 if (query == wait_query) { 858 query->flags |= IB_SA_CANCEL; 859 query->timeout = jiffies; 860 list_move(&query->list, &ib_nl_request_list); 861 found = 1; 862 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1); 863 break; 864 } 865 } 866 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 867 868 return found; 869 } 870 871 static void send_handler(struct ib_mad_agent *agent, 872 struct ib_mad_send_wc *mad_send_wc); 873 874 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, 875 const struct nlmsghdr *nlh) 876 { 877 struct ib_path_rec_data *srec, *drec; 878 struct ib_sa_path_query *path_query; 879 struct ib_mad_send_wc mad_send_wc; 880 const struct nlattr *head, *curr; 881 struct ib_sa_mad *mad = NULL; 882 int len, rem, num_prs = 0; 883 u32 mask = 0; 884 int status = -EIO; 885 886 if (!query->callback) 887 goto out; 888 889 path_query = container_of(query, struct ib_sa_path_query, sa_query); 890 mad = query->mad_buf->mad; 891 if (!path_query->conv_pr && 892 (be16_to_cpu(mad->mad_hdr.attr_id) == IB_SA_ATTR_PATH_REC)) { 893 /* Need a larger buffer for possible multiple PRs */ 894 query->resp_pr_data = kvcalloc(RDMA_PRIMARY_PATH_MAX_REC_NUM, 895 sizeof(*drec), GFP_KERNEL); 896 if (!query->resp_pr_data) { 897 query->callback(query, -ENOMEM, 0, NULL); 898 return; 899 } 900 } 901 902 head = (const struct nlattr *) nlmsg_data(nlh); 903 len = nlmsg_len(nlh); 904 switch (query->path_use) { 905 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL: 906 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND; 907 break; 908 909 case LS_RESOLVE_PATH_USE_ALL: 910 mask = IB_PATH_PRIMARY; 911 break; 912 913 case LS_RESOLVE_PATH_USE_GMP: 914 default: 915 mask = IB_PATH_PRIMARY | IB_PATH_GMP | 916 IB_PATH_BIDIRECTIONAL; 917 break; 918 } 919 920 drec = (struct ib_path_rec_data *)query->resp_pr_data; 921 nla_for_each_attr(curr, head, len, rem) { 922 if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD) 923 continue; 924 925 srec = nla_data(curr); 926 if ((srec->flags & mask) != mask) 927 continue; 928 929 status = 0; 930 if (!drec) { 931 memcpy(mad->data, srec->path_rec, 932 sizeof(srec->path_rec)); 933 num_prs = 1; 934 break; 935 } 936 937 memcpy(drec, srec, sizeof(*drec)); 938 drec++; 939 num_prs++; 940 if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM) 941 break; 942 } 943 944 if (!status) 945 mad->mad_hdr.method |= IB_MGMT_METHOD_RESP; 946 947 query->callback(query, status, num_prs, mad); 948 kvfree(query->resp_pr_data); 949 query->resp_pr_data = NULL; 950 951 out: 952 mad_send_wc.send_buf = query->mad_buf; 953 mad_send_wc.status = IB_WC_SUCCESS; 954 send_handler(query->mad_buf->mad_agent, &mad_send_wc); 955 } 956 957 static void ib_nl_request_timeout(struct work_struct *work) 958 { 959 unsigned long flags; 960 struct ib_sa_query *query; 961 unsigned long delay; 962 struct ib_mad_send_wc mad_send_wc; 963 int ret; 964 965 spin_lock_irqsave(&ib_nl_request_lock, flags); 966 while (!list_empty(&ib_nl_request_list)) { 967 query = list_entry(ib_nl_request_list.next, 968 struct ib_sa_query, list); 969 970 if (time_after(query->timeout, jiffies)) { 971 delay = query->timeout - jiffies; 972 if ((long)delay <= 0) 973 delay = 1; 974 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 975 break; 976 } 977 978 list_del(&query->list); 979 ib_sa_disable_local_svc(query); 980 /* Hold the lock to protect against query cancellation */ 981 if (ib_sa_query_cancelled(query)) 982 ret = -1; 983 else 984 ret = ib_post_send_mad(query->mad_buf, NULL); 985 if (ret) { 986 mad_send_wc.send_buf = query->mad_buf; 987 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 988 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 989 send_handler(query->port->agent, &mad_send_wc); 990 spin_lock_irqsave(&ib_nl_request_lock, flags); 991 } 992 } 993 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 994 } 995 996 int ib_nl_handle_set_timeout(struct sk_buff *skb, 997 struct nlmsghdr *nlh, 998 struct netlink_ext_ack *extack) 999 { 1000 int timeout, delta, abs_delta; 1001 const struct nlattr *attr; 1002 unsigned long flags; 1003 struct ib_sa_query *query; 1004 long delay = 0; 1005 struct nlattr *tb[LS_NLA_TYPE_MAX]; 1006 int ret; 1007 1008 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) || 1009 !(NETLINK_CB(skb).sk)) 1010 return -EPERM; 1011 1012 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 1013 nlmsg_len(nlh), ib_nl_policy, NULL); 1014 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; 1015 if (ret || !attr) 1016 goto settimeout_out; 1017 1018 timeout = *(int *) nla_data(attr); 1019 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN) 1020 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN; 1021 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) 1022 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; 1023 1024 delta = timeout - sa_local_svc_timeout_ms; 1025 if (delta < 0) 1026 abs_delta = -delta; 1027 else 1028 abs_delta = delta; 1029 1030 if (delta != 0) { 1031 spin_lock_irqsave(&ib_nl_request_lock, flags); 1032 sa_local_svc_timeout_ms = timeout; 1033 list_for_each_entry(query, &ib_nl_request_list, list) { 1034 if (delta < 0 && abs_delta > query->timeout) 1035 query->timeout = 0; 1036 else 1037 query->timeout += delta; 1038 1039 /* Get the new delay from the first entry */ 1040 if (!delay) { 1041 delay = query->timeout - jiffies; 1042 if (delay <= 0) 1043 delay = 1; 1044 } 1045 } 1046 if (delay) 1047 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1048 (unsigned long)delay); 1049 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1050 } 1051 1052 settimeout_out: 1053 return 0; 1054 } 1055 1056 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) 1057 { 1058 struct nlattr *tb[LS_NLA_TYPE_MAX]; 1059 int ret; 1060 1061 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) 1062 return 0; 1063 1064 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 1065 nlmsg_len(nlh), ib_nl_policy, NULL); 1066 if (ret) 1067 return 0; 1068 1069 return 1; 1070 } 1071 1072 int ib_nl_handle_resolve_resp(struct sk_buff *skb, 1073 struct nlmsghdr *nlh, 1074 struct netlink_ext_ack *extack) 1075 { 1076 unsigned long flags; 1077 struct ib_sa_query *query = NULL, *iter; 1078 struct ib_mad_send_buf *send_buf; 1079 struct ib_mad_send_wc mad_send_wc; 1080 int ret; 1081 1082 if ((nlh->nlmsg_flags & NLM_F_REQUEST) || 1083 !(NETLINK_CB(skb).sk)) 1084 return -EPERM; 1085 1086 spin_lock_irqsave(&ib_nl_request_lock, flags); 1087 list_for_each_entry(iter, &ib_nl_request_list, list) { 1088 /* 1089 * If the query is cancelled, let the timeout routine 1090 * take care of it. 1091 */ 1092 if (nlh->nlmsg_seq == iter->seq) { 1093 if (!ib_sa_query_cancelled(iter)) { 1094 list_del(&iter->list); 1095 query = iter; 1096 } 1097 break; 1098 } 1099 } 1100 1101 if (!query) { 1102 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1103 goto resp_out; 1104 } 1105 1106 send_buf = query->mad_buf; 1107 1108 if (!ib_nl_is_good_resolve_resp(nlh)) { 1109 /* if the result is a failure, send out the packet via IB */ 1110 ib_sa_disable_local_svc(query); 1111 ret = ib_post_send_mad(query->mad_buf, NULL); 1112 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1113 if (ret) { 1114 mad_send_wc.send_buf = send_buf; 1115 mad_send_wc.status = IB_WC_GENERAL_ERR; 1116 send_handler(query->port->agent, &mad_send_wc); 1117 } 1118 } else { 1119 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1120 ib_nl_process_good_resolve_rsp(query, nlh); 1121 } 1122 1123 resp_out: 1124 return 0; 1125 } 1126 1127 static void free_sm_ah(struct kref *kref) 1128 { 1129 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 1130 1131 rdma_destroy_ah(sm_ah->ah, 0); 1132 kfree(sm_ah); 1133 } 1134 1135 void ib_sa_register_client(struct ib_sa_client *client) 1136 { 1137 atomic_set(&client->users, 1); 1138 init_completion(&client->comp); 1139 } 1140 EXPORT_SYMBOL(ib_sa_register_client); 1141 1142 void ib_sa_unregister_client(struct ib_sa_client *client) 1143 { 1144 ib_sa_client_put(client); 1145 wait_for_completion(&client->comp); 1146 } 1147 EXPORT_SYMBOL(ib_sa_unregister_client); 1148 1149 /** 1150 * ib_sa_cancel_query - try to cancel an SA query 1151 * @id:ID of query to cancel 1152 * @query:query pointer to cancel 1153 * 1154 * Try to cancel an SA query. If the id and query don't match up or 1155 * the query has already completed, nothing is done. Otherwise the 1156 * query is canceled and will complete with a status of -EINTR. 1157 */ 1158 void ib_sa_cancel_query(int id, struct ib_sa_query *query) 1159 { 1160 unsigned long flags; 1161 struct ib_mad_send_buf *mad_buf; 1162 1163 xa_lock_irqsave(&queries, flags); 1164 if (xa_load(&queries, id) != query) { 1165 xa_unlock_irqrestore(&queries, flags); 1166 return; 1167 } 1168 mad_buf = query->mad_buf; 1169 xa_unlock_irqrestore(&queries, flags); 1170 1171 /* 1172 * If the query is still on the netlink request list, schedule 1173 * it to be cancelled by the timeout routine. Otherwise, it has been 1174 * sent to the MAD layer and has to be cancelled from there. 1175 */ 1176 if (!ib_nl_cancel_request(query)) 1177 ib_cancel_mad(mad_buf); 1178 } 1179 EXPORT_SYMBOL(ib_sa_cancel_query); 1180 1181 static u8 get_src_path_mask(struct ib_device *device, u32 port_num) 1182 { 1183 struct ib_sa_device *sa_dev; 1184 struct ib_sa_port *port; 1185 unsigned long flags; 1186 u8 src_path_mask; 1187 1188 sa_dev = ib_get_client_data(device, &sa_client); 1189 if (!sa_dev) 1190 return 0x7f; 1191 1192 port = &sa_dev->port[port_num - sa_dev->start_port]; 1193 spin_lock_irqsave(&port->ah_lock, flags); 1194 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; 1195 spin_unlock_irqrestore(&port->ah_lock, flags); 1196 1197 return src_path_mask; 1198 } 1199 1200 static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num, 1201 struct sa_path_rec *rec, 1202 struct rdma_ah_attr *ah_attr, 1203 const struct ib_gid_attr *gid_attr) 1204 { 1205 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec); 1206 1207 if (!gid_attr) { 1208 gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type, 1209 port_num, NULL); 1210 if (IS_ERR(gid_attr)) 1211 return PTR_ERR(gid_attr); 1212 } else 1213 rdma_hold_gid_attr(gid_attr); 1214 1215 rdma_move_grh_sgid_attr(ah_attr, &rec->dgid, 1216 be32_to_cpu(rec->flow_label), 1217 rec->hop_limit, rec->traffic_class, 1218 gid_attr); 1219 return 0; 1220 } 1221 1222 /** 1223 * ib_init_ah_attr_from_path - Initialize address handle attributes based on 1224 * an SA path record. 1225 * @device: Device associated ah attributes initialization. 1226 * @port_num: Port on the specified device. 1227 * @rec: path record entry to use for ah attributes initialization. 1228 * @ah_attr: address handle attributes to initialization from path record. 1229 * @gid_attr: SGID attribute to consider during initialization. 1230 * 1231 * When ib_init_ah_attr_from_path() returns success, 1232 * (a) for IB link layer it optionally contains a reference to SGID attribute 1233 * when GRH is present for IB link layer. 1234 * (b) for RoCE link layer it contains a reference to SGID attribute. 1235 * User must invoke rdma_destroy_ah_attr() to release reference to SGID 1236 * attributes which are initialized using ib_init_ah_attr_from_path(). 1237 */ 1238 int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num, 1239 struct sa_path_rec *rec, 1240 struct rdma_ah_attr *ah_attr, 1241 const struct ib_gid_attr *gid_attr) 1242 { 1243 int ret = 0; 1244 1245 memset(ah_attr, 0, sizeof(*ah_attr)); 1246 ah_attr->type = rdma_ah_find_type(device, port_num); 1247 rdma_ah_set_sl(ah_attr, rec->sl); 1248 rdma_ah_set_port_num(ah_attr, port_num); 1249 rdma_ah_set_static_rate(ah_attr, rec->rate); 1250 1251 if (sa_path_is_roce(rec)) { 1252 ret = roce_resolve_route_from_path(rec, gid_attr); 1253 if (ret) 1254 return ret; 1255 1256 memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN); 1257 } else { 1258 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec))); 1259 if (sa_path_is_opa(rec) && 1260 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) 1261 rdma_ah_set_make_grd(ah_attr, true); 1262 1263 rdma_ah_set_path_bits(ah_attr, 1264 be32_to_cpu(sa_path_get_slid(rec)) & 1265 get_src_path_mask(device, port_num)); 1266 } 1267 1268 if (rec->hop_limit > 0 || sa_path_is_roce(rec)) 1269 ret = init_ah_attr_grh_fields(device, port_num, 1270 rec, ah_attr, gid_attr); 1271 return ret; 1272 } 1273 EXPORT_SYMBOL(ib_init_ah_attr_from_path); 1274 1275 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) 1276 { 1277 struct rdma_ah_attr ah_attr; 1278 unsigned long flags; 1279 1280 spin_lock_irqsave(&query->port->ah_lock, flags); 1281 if (!query->port->sm_ah) { 1282 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1283 return -EAGAIN; 1284 } 1285 kref_get(&query->port->sm_ah->ref); 1286 query->sm_ah = query->port->sm_ah; 1287 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1288 1289 /* 1290 * Always check if sm_ah has valid dlid assigned, 1291 * before querying for class port info 1292 */ 1293 if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) || 1294 !rdma_is_valid_unicast_lid(&ah_attr)) { 1295 kref_put(&query->sm_ah->ref, free_sm_ah); 1296 return -EAGAIN; 1297 } 1298 query->mad_buf = ib_create_send_mad(query->port->agent, 1, 1299 query->sm_ah->pkey_index, 1300 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 1301 gfp_mask, 1302 ((query->flags & IB_SA_QUERY_OPA) ? 1303 OPA_MGMT_BASE_VERSION : 1304 IB_MGMT_BASE_VERSION)); 1305 if (IS_ERR(query->mad_buf)) { 1306 kref_put(&query->sm_ah->ref, free_sm_ah); 1307 return -ENOMEM; 1308 } 1309 1310 query->mad_buf->ah = query->sm_ah->ah; 1311 1312 return 0; 1313 } 1314 1315 static void free_mad(struct ib_sa_query *query) 1316 { 1317 ib_free_send_mad(query->mad_buf); 1318 kref_put(&query->sm_ah->ref, free_sm_ah); 1319 } 1320 1321 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent) 1322 { 1323 struct ib_sa_mad *mad = query->mad_buf->mad; 1324 unsigned long flags; 1325 1326 memset(mad, 0, sizeof *mad); 1327 1328 if (query->flags & IB_SA_QUERY_OPA) { 1329 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION; 1330 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION; 1331 } else { 1332 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; 1333 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; 1334 } 1335 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 1336 spin_lock_irqsave(&tid_lock, flags); 1337 mad->mad_hdr.tid = 1338 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); 1339 spin_unlock_irqrestore(&tid_lock, flags); 1340 } 1341 1342 static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms, 1343 gfp_t gfp_mask) 1344 { 1345 unsigned long flags; 1346 int ret, id; 1347 const int nmbr_sa_query_retries = 10; 1348 1349 xa_lock_irqsave(&queries, flags); 1350 ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask); 1351 xa_unlock_irqrestore(&queries, flags); 1352 if (ret < 0) 1353 return ret; 1354 1355 query->mad_buf->timeout_ms = timeout_ms / nmbr_sa_query_retries; 1356 query->mad_buf->retries = nmbr_sa_query_retries; 1357 if (!query->mad_buf->timeout_ms) { 1358 /* Special case, very small timeout_ms */ 1359 query->mad_buf->timeout_ms = 1; 1360 query->mad_buf->retries = timeout_ms; 1361 } 1362 query->mad_buf->context[0] = query; 1363 query->id = id; 1364 1365 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) && 1366 (!(query->flags & IB_SA_QUERY_OPA))) { 1367 if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) { 1368 if (!ib_nl_make_request(query, gfp_mask)) 1369 return id; 1370 } 1371 ib_sa_disable_local_svc(query); 1372 } 1373 1374 ret = ib_post_send_mad(query->mad_buf, NULL); 1375 if (ret) { 1376 xa_lock_irqsave(&queries, flags); 1377 __xa_erase(&queries, id); 1378 xa_unlock_irqrestore(&queries, flags); 1379 } 1380 1381 /* 1382 * It's not safe to dereference query any more, because the 1383 * send may already have completed and freed the query in 1384 * another context. 1385 */ 1386 return ret ? ret : id; 1387 } 1388 1389 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec) 1390 { 1391 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); 1392 } 1393 EXPORT_SYMBOL(ib_sa_unpack_path); 1394 1395 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute) 1396 { 1397 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); 1398 } 1399 EXPORT_SYMBOL(ib_sa_pack_path); 1400 1401 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client, 1402 struct ib_sa_device *sa_dev, 1403 u32 port_num) 1404 { 1405 struct ib_sa_port *port; 1406 unsigned long flags; 1407 bool ret = false; 1408 1409 port = &sa_dev->port[port_num - sa_dev->start_port]; 1410 spin_lock_irqsave(&port->classport_lock, flags); 1411 if (!port->classport_info.valid) 1412 goto ret; 1413 1414 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA) 1415 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) & 1416 OPA_CLASS_PORT_INFO_PR_SUPPORT; 1417 ret: 1418 spin_unlock_irqrestore(&port->classport_lock, flags); 1419 return ret; 1420 } 1421 1422 enum opa_pr_supported { 1423 PR_NOT_SUPPORTED, 1424 PR_OPA_SUPPORTED, 1425 PR_IB_SUPPORTED 1426 }; 1427 1428 /* 1429 * opa_pr_query_possible - Check if current PR query can be an OPA query. 1430 * 1431 * Retuns PR_NOT_SUPPORTED if a path record query is not 1432 * possible, PR_OPA_SUPPORTED if an OPA path record query 1433 * is possible and PR_IB_SUPPORTED if an IB path record 1434 * query is possible. 1435 */ 1436 static int opa_pr_query_possible(struct ib_sa_client *client, 1437 struct ib_sa_device *sa_dev, 1438 struct ib_device *device, u32 port_num) 1439 { 1440 struct ib_port_attr port_attr; 1441 1442 if (ib_query_port(device, port_num, &port_attr)) 1443 return PR_NOT_SUPPORTED; 1444 1445 if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num)) 1446 return PR_OPA_SUPPORTED; 1447 1448 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 1449 return PR_NOT_SUPPORTED; 1450 else 1451 return PR_IB_SUPPORTED; 1452 } 1453 1454 static void ib_sa_pr_callback_single(struct ib_sa_path_query *query, 1455 int status, struct ib_sa_mad *mad) 1456 { 1457 struct sa_path_rec rec = {}; 1458 1459 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 1460 mad->data, &rec); 1461 rec.rec_type = SA_PATH_REC_TYPE_IB; 1462 sa_path_set_dmac_zero(&rec); 1463 1464 if (query->conv_pr) { 1465 struct sa_path_rec opa; 1466 1467 memset(&opa, 0, sizeof(struct sa_path_rec)); 1468 sa_convert_path_ib_to_opa(&opa, &rec); 1469 query->callback(status, &opa, 1, query->context); 1470 } else { 1471 query->callback(status, &rec, 1, query->context); 1472 } 1473 } 1474 1475 /** 1476 * ib_sa_pr_callback_multiple() - Parse path records then do callback. 1477 * 1478 * In a multiple-PR case the PRs are saved in "query->resp_pr_data" 1479 * (instead of"mad->data") and with "ib_path_rec_data" structure format, 1480 * so that rec->flags can be set to indicate the type of PR. 1481 * This is valid only in IB fabric. 1482 */ 1483 static void ib_sa_pr_callback_multiple(struct ib_sa_path_query *query, 1484 int status, int num_prs, 1485 struct ib_path_rec_data *rec_data) 1486 { 1487 struct sa_path_rec *rec; 1488 int i; 1489 1490 rec = kvcalloc(num_prs, sizeof(*rec), GFP_KERNEL); 1491 if (!rec) { 1492 query->callback(-ENOMEM, NULL, 0, query->context); 1493 return; 1494 } 1495 1496 for (i = 0; i < num_prs; i++) { 1497 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 1498 rec_data[i].path_rec, rec + i); 1499 rec[i].rec_type = SA_PATH_REC_TYPE_IB; 1500 sa_path_set_dmac_zero(rec + i); 1501 rec[i].flags = rec_data[i].flags; 1502 } 1503 1504 query->callback(status, rec, num_prs, query->context); 1505 kvfree(rec); 1506 } 1507 1508 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 1509 int status, int num_prs, 1510 struct ib_sa_mad *mad) 1511 { 1512 struct ib_sa_path_query *query = 1513 container_of(sa_query, struct ib_sa_path_query, sa_query); 1514 struct sa_path_rec rec; 1515 1516 if (!mad || !num_prs) { 1517 query->callback(status, NULL, 0, query->context); 1518 return; 1519 } 1520 1521 if (sa_query->flags & IB_SA_QUERY_OPA) { 1522 if (num_prs != 1) { 1523 query->callback(-EINVAL, NULL, 0, query->context); 1524 return; 1525 } 1526 1527 ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), 1528 mad->data, &rec); 1529 rec.rec_type = SA_PATH_REC_TYPE_OPA; 1530 query->callback(status, &rec, num_prs, query->context); 1531 } else { 1532 if (!sa_query->resp_pr_data) 1533 ib_sa_pr_callback_single(query, status, mad); 1534 else 1535 ib_sa_pr_callback_multiple(query, status, num_prs, 1536 sa_query->resp_pr_data); 1537 } 1538 } 1539 1540 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 1541 { 1542 struct ib_sa_path_query *query = 1543 container_of(sa_query, struct ib_sa_path_query, sa_query); 1544 1545 kfree(query->conv_pr); 1546 kfree(query); 1547 } 1548 1549 /** 1550 * ib_sa_path_rec_get - Start a Path get query 1551 * @client:SA client 1552 * @device:device to send query on 1553 * @port_num: port number to send query on 1554 * @rec:Path Record to send in query 1555 * @comp_mask:component mask to send in query 1556 * @timeout_ms:time to wait for response 1557 * @gfp_mask:GFP mask to use for internal allocations 1558 * @callback:function called when query completes, times out or is 1559 * canceled 1560 * @context:opaque user context passed to callback 1561 * @sa_query:query context, used to cancel query 1562 * 1563 * Send a Path Record Get query to the SA to look up a path. The 1564 * callback function will be called when the query completes (or 1565 * fails); status is 0 for a successful response, -EINTR if the query 1566 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1567 * occurred sending the query. The resp parameter of the callback is 1568 * only valid if status is 0. 1569 * 1570 * If the return value of ib_sa_path_rec_get() is negative, it is an 1571 * error code. Otherwise it is a query ID that can be used to cancel 1572 * the query. 1573 */ 1574 int ib_sa_path_rec_get(struct ib_sa_client *client, 1575 struct ib_device *device, u32 port_num, 1576 struct sa_path_rec *rec, 1577 ib_sa_comp_mask comp_mask, 1578 unsigned long timeout_ms, gfp_t gfp_mask, 1579 void (*callback)(int status, 1580 struct sa_path_rec *resp, 1581 int num_paths, void *context), 1582 void *context, 1583 struct ib_sa_query **sa_query) 1584 { 1585 struct ib_sa_path_query *query; 1586 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1587 struct ib_sa_port *port; 1588 struct ib_mad_agent *agent; 1589 struct ib_sa_mad *mad; 1590 enum opa_pr_supported status; 1591 int ret; 1592 1593 if (!sa_dev) 1594 return -ENODEV; 1595 1596 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) && 1597 (rec->rec_type != SA_PATH_REC_TYPE_OPA)) 1598 return -EINVAL; 1599 1600 port = &sa_dev->port[port_num - sa_dev->start_port]; 1601 agent = port->agent; 1602 1603 query = kzalloc(sizeof(*query), gfp_mask); 1604 if (!query) 1605 return -ENOMEM; 1606 1607 query->sa_query.port = port; 1608 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { 1609 status = opa_pr_query_possible(client, sa_dev, device, port_num); 1610 if (status == PR_NOT_SUPPORTED) { 1611 ret = -EINVAL; 1612 goto err1; 1613 } else if (status == PR_OPA_SUPPORTED) { 1614 query->sa_query.flags |= IB_SA_QUERY_OPA; 1615 } else { 1616 query->conv_pr = 1617 kmalloc(sizeof(*query->conv_pr), gfp_mask); 1618 if (!query->conv_pr) { 1619 ret = -ENOMEM; 1620 goto err1; 1621 } 1622 } 1623 } 1624 1625 ret = alloc_mad(&query->sa_query, gfp_mask); 1626 if (ret) 1627 goto err2; 1628 1629 ib_sa_client_get(client); 1630 query->sa_query.client = client; 1631 query->callback = callback; 1632 query->context = context; 1633 1634 mad = query->sa_query.mad_buf->mad; 1635 init_mad(&query->sa_query, agent); 1636 1637 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 1638 query->sa_query.release = ib_sa_path_rec_release; 1639 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1640 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 1641 mad->sa_hdr.comp_mask = comp_mask; 1642 1643 if (query->sa_query.flags & IB_SA_QUERY_OPA) { 1644 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), 1645 rec, mad->data); 1646 } else if (query->conv_pr) { 1647 sa_convert_path_opa_to_ib(query->conv_pr, rec); 1648 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 1649 query->conv_pr, mad->data); 1650 } else { 1651 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 1652 rec, mad->data); 1653 } 1654 1655 *sa_query = &query->sa_query; 1656 1657 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; 1658 query->sa_query.mad_buf->context[1] = (query->conv_pr) ? 1659 query->conv_pr : rec; 1660 1661 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1662 if (ret < 0) 1663 goto err3; 1664 1665 return ret; 1666 1667 err3: 1668 *sa_query = NULL; 1669 ib_sa_client_put(query->sa_query.client); 1670 free_mad(&query->sa_query); 1671 err2: 1672 kfree(query->conv_pr); 1673 err1: 1674 kfree(query); 1675 return ret; 1676 } 1677 EXPORT_SYMBOL(ib_sa_path_rec_get); 1678 1679 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 1680 int status, int num_prs, 1681 struct ib_sa_mad *mad) 1682 { 1683 struct ib_sa_mcmember_query *query = 1684 container_of(sa_query, struct ib_sa_mcmember_query, sa_query); 1685 1686 if (mad) { 1687 struct ib_sa_mcmember_rec rec; 1688 1689 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1690 mad->data, &rec); 1691 query->callback(status, &rec, query->context); 1692 } else 1693 query->callback(status, NULL, query->context); 1694 } 1695 1696 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 1697 { 1698 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 1699 } 1700 1701 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 1702 struct ib_device *device, u32 port_num, 1703 u8 method, 1704 struct ib_sa_mcmember_rec *rec, 1705 ib_sa_comp_mask comp_mask, 1706 unsigned long timeout_ms, gfp_t gfp_mask, 1707 void (*callback)(int status, 1708 struct ib_sa_mcmember_rec *resp, 1709 void *context), 1710 void *context, 1711 struct ib_sa_query **sa_query) 1712 { 1713 struct ib_sa_mcmember_query *query; 1714 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1715 struct ib_sa_port *port; 1716 struct ib_mad_agent *agent; 1717 struct ib_sa_mad *mad; 1718 int ret; 1719 1720 if (!sa_dev) 1721 return -ENODEV; 1722 1723 port = &sa_dev->port[port_num - sa_dev->start_port]; 1724 agent = port->agent; 1725 1726 query = kzalloc(sizeof(*query), gfp_mask); 1727 if (!query) 1728 return -ENOMEM; 1729 1730 query->sa_query.port = port; 1731 ret = alloc_mad(&query->sa_query, gfp_mask); 1732 if (ret) 1733 goto err1; 1734 1735 ib_sa_client_get(client); 1736 query->sa_query.client = client; 1737 query->callback = callback; 1738 query->context = context; 1739 1740 mad = query->sa_query.mad_buf->mad; 1741 init_mad(&query->sa_query, agent); 1742 1743 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 1744 query->sa_query.release = ib_sa_mcmember_rec_release; 1745 mad->mad_hdr.method = method; 1746 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 1747 mad->sa_hdr.comp_mask = comp_mask; 1748 1749 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1750 rec, mad->data); 1751 1752 *sa_query = &query->sa_query; 1753 1754 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1755 if (ret < 0) 1756 goto err2; 1757 1758 return ret; 1759 1760 err2: 1761 *sa_query = NULL; 1762 ib_sa_client_put(query->sa_query.client); 1763 free_mad(&query->sa_query); 1764 1765 err1: 1766 kfree(query); 1767 return ret; 1768 } 1769 1770 /* Support GuidInfoRecord */ 1771 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, 1772 int status, int num_paths, 1773 struct ib_sa_mad *mad) 1774 { 1775 struct ib_sa_guidinfo_query *query = 1776 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); 1777 1778 if (mad) { 1779 struct ib_sa_guidinfo_rec rec; 1780 1781 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), 1782 mad->data, &rec); 1783 query->callback(status, &rec, query->context); 1784 } else 1785 query->callback(status, NULL, query->context); 1786 } 1787 1788 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) 1789 { 1790 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); 1791 } 1792 1793 int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 1794 struct ib_device *device, u32 port_num, 1795 struct ib_sa_guidinfo_rec *rec, 1796 ib_sa_comp_mask comp_mask, u8 method, 1797 unsigned long timeout_ms, gfp_t gfp_mask, 1798 void (*callback)(int status, 1799 struct ib_sa_guidinfo_rec *resp, 1800 void *context), 1801 void *context, 1802 struct ib_sa_query **sa_query) 1803 { 1804 struct ib_sa_guidinfo_query *query; 1805 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1806 struct ib_sa_port *port; 1807 struct ib_mad_agent *agent; 1808 struct ib_sa_mad *mad; 1809 int ret; 1810 1811 if (!sa_dev) 1812 return -ENODEV; 1813 1814 if (method != IB_MGMT_METHOD_GET && 1815 method != IB_MGMT_METHOD_SET && 1816 method != IB_SA_METHOD_DELETE) { 1817 return -EINVAL; 1818 } 1819 1820 port = &sa_dev->port[port_num - sa_dev->start_port]; 1821 agent = port->agent; 1822 1823 query = kzalloc(sizeof(*query), gfp_mask); 1824 if (!query) 1825 return -ENOMEM; 1826 1827 query->sa_query.port = port; 1828 ret = alloc_mad(&query->sa_query, gfp_mask); 1829 if (ret) 1830 goto err1; 1831 1832 ib_sa_client_get(client); 1833 query->sa_query.client = client; 1834 query->callback = callback; 1835 query->context = context; 1836 1837 mad = query->sa_query.mad_buf->mad; 1838 init_mad(&query->sa_query, agent); 1839 1840 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; 1841 query->sa_query.release = ib_sa_guidinfo_rec_release; 1842 1843 mad->mad_hdr.method = method; 1844 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); 1845 mad->sa_hdr.comp_mask = comp_mask; 1846 1847 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, 1848 mad->data); 1849 1850 *sa_query = &query->sa_query; 1851 1852 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1853 if (ret < 0) 1854 goto err2; 1855 1856 return ret; 1857 1858 err2: 1859 *sa_query = NULL; 1860 ib_sa_client_put(query->sa_query.client); 1861 free_mad(&query->sa_query); 1862 1863 err1: 1864 kfree(query); 1865 return ret; 1866 } 1867 EXPORT_SYMBOL(ib_sa_guid_info_rec_query); 1868 1869 struct ib_classport_info_context { 1870 struct completion done; 1871 struct ib_sa_query *sa_query; 1872 }; 1873 1874 static void ib_classportinfo_cb(void *context) 1875 { 1876 struct ib_classport_info_context *cb_ctx = context; 1877 1878 complete(&cb_ctx->done); 1879 } 1880 1881 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, 1882 int status, int num_prs, 1883 struct ib_sa_mad *mad) 1884 { 1885 unsigned long flags; 1886 struct ib_sa_classport_info_query *query = 1887 container_of(sa_query, struct ib_sa_classport_info_query, sa_query); 1888 struct ib_sa_classport_cache *info = &sa_query->port->classport_info; 1889 1890 if (mad) { 1891 if (sa_query->flags & IB_SA_QUERY_OPA) { 1892 struct opa_class_port_info rec; 1893 1894 ib_unpack(opa_classport_info_rec_table, 1895 ARRAY_SIZE(opa_classport_info_rec_table), 1896 mad->data, &rec); 1897 1898 spin_lock_irqsave(&sa_query->port->classport_lock, 1899 flags); 1900 if (!status && !info->valid) { 1901 memcpy(&info->data.opa, &rec, 1902 sizeof(info->data.opa)); 1903 1904 info->valid = true; 1905 info->data.type = RDMA_CLASS_PORT_INFO_OPA; 1906 } 1907 spin_unlock_irqrestore(&sa_query->port->classport_lock, 1908 flags); 1909 1910 } else { 1911 struct ib_class_port_info rec; 1912 1913 ib_unpack(ib_classport_info_rec_table, 1914 ARRAY_SIZE(ib_classport_info_rec_table), 1915 mad->data, &rec); 1916 1917 spin_lock_irqsave(&sa_query->port->classport_lock, 1918 flags); 1919 if (!status && !info->valid) { 1920 memcpy(&info->data.ib, &rec, 1921 sizeof(info->data.ib)); 1922 1923 info->valid = true; 1924 info->data.type = RDMA_CLASS_PORT_INFO_IB; 1925 } 1926 spin_unlock_irqrestore(&sa_query->port->classport_lock, 1927 flags); 1928 } 1929 } 1930 query->callback(query->context); 1931 } 1932 1933 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query) 1934 { 1935 kfree(container_of(sa_query, struct ib_sa_classport_info_query, 1936 sa_query)); 1937 } 1938 1939 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port, 1940 unsigned long timeout_ms, 1941 void (*callback)(void *context), 1942 void *context, 1943 struct ib_sa_query **sa_query) 1944 { 1945 struct ib_mad_agent *agent; 1946 struct ib_sa_classport_info_query *query; 1947 struct ib_sa_mad *mad; 1948 gfp_t gfp_mask = GFP_KERNEL; 1949 int ret; 1950 1951 agent = port->agent; 1952 1953 query = kzalloc(sizeof(*query), gfp_mask); 1954 if (!query) 1955 return -ENOMEM; 1956 1957 query->sa_query.port = port; 1958 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device, 1959 port->port_num) ? 1960 IB_SA_QUERY_OPA : 0; 1961 ret = alloc_mad(&query->sa_query, gfp_mask); 1962 if (ret) 1963 goto err_free; 1964 1965 query->callback = callback; 1966 query->context = context; 1967 1968 mad = query->sa_query.mad_buf->mad; 1969 init_mad(&query->sa_query, agent); 1970 1971 query->sa_query.callback = ib_sa_classport_info_rec_callback; 1972 query->sa_query.release = ib_sa_classport_info_rec_release; 1973 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1974 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO); 1975 mad->sa_hdr.comp_mask = 0; 1976 *sa_query = &query->sa_query; 1977 1978 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1979 if (ret < 0) 1980 goto err_free_mad; 1981 1982 return ret; 1983 1984 err_free_mad: 1985 *sa_query = NULL; 1986 free_mad(&query->sa_query); 1987 1988 err_free: 1989 kfree(query); 1990 return ret; 1991 } 1992 1993 static void update_ib_cpi(struct work_struct *work) 1994 { 1995 struct ib_sa_port *port = 1996 container_of(work, struct ib_sa_port, ib_cpi_work.work); 1997 struct ib_classport_info_context *cb_context; 1998 unsigned long flags; 1999 int ret; 2000 2001 /* If the classport info is valid, nothing 2002 * to do here. 2003 */ 2004 spin_lock_irqsave(&port->classport_lock, flags); 2005 if (port->classport_info.valid) { 2006 spin_unlock_irqrestore(&port->classport_lock, flags); 2007 return; 2008 } 2009 spin_unlock_irqrestore(&port->classport_lock, flags); 2010 2011 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL); 2012 if (!cb_context) 2013 goto err_nomem; 2014 2015 init_completion(&cb_context->done); 2016 2017 ret = ib_sa_classport_info_rec_query(port, 3000, 2018 ib_classportinfo_cb, cb_context, 2019 &cb_context->sa_query); 2020 if (ret < 0) 2021 goto free_cb_err; 2022 wait_for_completion(&cb_context->done); 2023 free_cb_err: 2024 kfree(cb_context); 2025 spin_lock_irqsave(&port->classport_lock, flags); 2026 2027 /* If the classport info is still not valid, the query should have 2028 * failed for some reason. Retry issuing the query 2029 */ 2030 if (!port->classport_info.valid) { 2031 port->classport_info.retry_cnt++; 2032 if (port->classport_info.retry_cnt <= 2033 IB_SA_CPI_MAX_RETRY_CNT) { 2034 unsigned long delay = 2035 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); 2036 2037 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay); 2038 } 2039 } 2040 spin_unlock_irqrestore(&port->classport_lock, flags); 2041 2042 err_nomem: 2043 return; 2044 } 2045 2046 static void send_handler(struct ib_mad_agent *agent, 2047 struct ib_mad_send_wc *mad_send_wc) 2048 { 2049 struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 2050 unsigned long flags; 2051 2052 if (query->callback) 2053 switch (mad_send_wc->status) { 2054 case IB_WC_SUCCESS: 2055 /* No callback -- already got recv */ 2056 break; 2057 case IB_WC_RESP_TIMEOUT_ERR: 2058 query->callback(query, -ETIMEDOUT, 0, NULL); 2059 break; 2060 case IB_WC_WR_FLUSH_ERR: 2061 query->callback(query, -EINTR, 0, NULL); 2062 break; 2063 default: 2064 query->callback(query, -EIO, 0, NULL); 2065 break; 2066 } 2067 2068 xa_lock_irqsave(&queries, flags); 2069 __xa_erase(&queries, query->id); 2070 xa_unlock_irqrestore(&queries, flags); 2071 2072 free_mad(query); 2073 if (query->client) 2074 ib_sa_client_put(query->client); 2075 query->release(query); 2076 } 2077 2078 static void recv_handler(struct ib_mad_agent *mad_agent, 2079 struct ib_mad_send_buf *send_buf, 2080 struct ib_mad_recv_wc *mad_recv_wc) 2081 { 2082 struct ib_sa_query *query; 2083 2084 if (!send_buf) 2085 return; 2086 2087 query = send_buf->context[0]; 2088 if (query->callback) { 2089 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 2090 query->callback(query, 2091 mad_recv_wc->recv_buf.mad->mad_hdr.status ? 2092 -EINVAL : 0, 1, 2093 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); 2094 else 2095 query->callback(query, -EIO, 0, NULL); 2096 } 2097 2098 ib_free_recv_mad(mad_recv_wc); 2099 } 2100 2101 static void update_sm_ah(struct work_struct *work) 2102 { 2103 struct ib_sa_port *port = 2104 container_of(work, struct ib_sa_port, update_task); 2105 struct ib_sa_sm_ah *new_ah; 2106 struct ib_port_attr port_attr; 2107 struct rdma_ah_attr ah_attr; 2108 bool grh_required; 2109 2110 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { 2111 pr_warn("Couldn't query port\n"); 2112 return; 2113 } 2114 2115 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL); 2116 if (!new_ah) 2117 return; 2118 2119 kref_init(&new_ah->ref); 2120 new_ah->src_path_mask = (1 << port_attr.lmc) - 1; 2121 2122 new_ah->pkey_index = 0; 2123 if (ib_find_pkey(port->agent->device, port->port_num, 2124 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) 2125 pr_err("Couldn't find index for default PKey\n"); 2126 2127 memset(&ah_attr, 0, sizeof(ah_attr)); 2128 ah_attr.type = rdma_ah_find_type(port->agent->device, 2129 port->port_num); 2130 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid); 2131 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl); 2132 rdma_ah_set_port_num(&ah_attr, port->port_num); 2133 2134 grh_required = rdma_is_grh_required(port->agent->device, 2135 port->port_num); 2136 2137 /* 2138 * The OPA sm_lid of 0xFFFF needs special handling so that it can be 2139 * differentiated from a permissive LID of 0xFFFF. We set the 2140 * grh_required flag here so the SA can program the DGID in the 2141 * address handle appropriately 2142 */ 2143 if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA && 2144 (grh_required || 2145 port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE))) 2146 rdma_ah_set_make_grd(&ah_attr, true); 2147 2148 if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) { 2149 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH); 2150 rdma_ah_set_subnet_prefix(&ah_attr, 2151 cpu_to_be64(port_attr.subnet_prefix)); 2152 rdma_ah_set_interface_id(&ah_attr, 2153 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)); 2154 } 2155 2156 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr, 2157 RDMA_CREATE_AH_SLEEPABLE); 2158 if (IS_ERR(new_ah->ah)) { 2159 pr_warn("Couldn't create new SM AH\n"); 2160 kfree(new_ah); 2161 return; 2162 } 2163 2164 spin_lock_irq(&port->ah_lock); 2165 if (port->sm_ah) 2166 kref_put(&port->sm_ah->ref, free_sm_ah); 2167 port->sm_ah = new_ah; 2168 spin_unlock_irq(&port->ah_lock); 2169 } 2170 2171 static void ib_sa_event(struct ib_event_handler *handler, 2172 struct ib_event *event) 2173 { 2174 if (event->event == IB_EVENT_PORT_ERR || 2175 event->event == IB_EVENT_PORT_ACTIVE || 2176 event->event == IB_EVENT_LID_CHANGE || 2177 event->event == IB_EVENT_PKEY_CHANGE || 2178 event->event == IB_EVENT_SM_CHANGE || 2179 event->event == IB_EVENT_CLIENT_REREGISTER) { 2180 unsigned long flags; 2181 struct ib_sa_device *sa_dev = 2182 container_of(handler, typeof(*sa_dev), event_handler); 2183 u32 port_num = event->element.port_num - sa_dev->start_port; 2184 struct ib_sa_port *port = &sa_dev->port[port_num]; 2185 2186 if (!rdma_cap_ib_sa(handler->device, port->port_num)) 2187 return; 2188 2189 spin_lock_irqsave(&port->ah_lock, flags); 2190 if (port->sm_ah) 2191 kref_put(&port->sm_ah->ref, free_sm_ah); 2192 port->sm_ah = NULL; 2193 spin_unlock_irqrestore(&port->ah_lock, flags); 2194 2195 if (event->event == IB_EVENT_SM_CHANGE || 2196 event->event == IB_EVENT_CLIENT_REREGISTER || 2197 event->event == IB_EVENT_LID_CHANGE || 2198 event->event == IB_EVENT_PORT_ACTIVE) { 2199 unsigned long delay = 2200 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); 2201 2202 spin_lock_irqsave(&port->classport_lock, flags); 2203 port->classport_info.valid = false; 2204 port->classport_info.retry_cnt = 0; 2205 spin_unlock_irqrestore(&port->classport_lock, flags); 2206 queue_delayed_work(ib_wq, 2207 &port->ib_cpi_work, delay); 2208 } 2209 queue_work(ib_wq, &sa_dev->port[port_num].update_task); 2210 } 2211 } 2212 2213 static int ib_sa_add_one(struct ib_device *device) 2214 { 2215 struct ib_sa_device *sa_dev; 2216 int s, e, i; 2217 int count = 0; 2218 int ret; 2219 2220 s = rdma_start_port(device); 2221 e = rdma_end_port(device); 2222 2223 sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL); 2224 if (!sa_dev) 2225 return -ENOMEM; 2226 2227 sa_dev->start_port = s; 2228 sa_dev->end_port = e; 2229 2230 for (i = 0; i <= e - s; ++i) { 2231 spin_lock_init(&sa_dev->port[i].ah_lock); 2232 if (!rdma_cap_ib_sa(device, i + 1)) 2233 continue; 2234 2235 sa_dev->port[i].sm_ah = NULL; 2236 sa_dev->port[i].port_num = i + s; 2237 2238 spin_lock_init(&sa_dev->port[i].classport_lock); 2239 sa_dev->port[i].classport_info.valid = false; 2240 2241 sa_dev->port[i].agent = 2242 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 2243 NULL, 0, send_handler, 2244 recv_handler, sa_dev, 0); 2245 if (IS_ERR(sa_dev->port[i].agent)) { 2246 ret = PTR_ERR(sa_dev->port[i].agent); 2247 goto err; 2248 } 2249 2250 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 2251 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work, 2252 update_ib_cpi); 2253 2254 count++; 2255 } 2256 2257 if (!count) { 2258 ret = -EOPNOTSUPP; 2259 goto free; 2260 } 2261 2262 ib_set_client_data(device, &sa_client, sa_dev); 2263 2264 /* 2265 * We register our event handler after everything is set up, 2266 * and then update our cached info after the event handler is 2267 * registered to avoid any problems if a port changes state 2268 * during our initialization. 2269 */ 2270 2271 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); 2272 ib_register_event_handler(&sa_dev->event_handler); 2273 2274 for (i = 0; i <= e - s; ++i) { 2275 if (rdma_cap_ib_sa(device, i + 1)) 2276 update_sm_ah(&sa_dev->port[i].update_task); 2277 } 2278 2279 return 0; 2280 2281 err: 2282 while (--i >= 0) { 2283 if (rdma_cap_ib_sa(device, i + 1)) 2284 ib_unregister_mad_agent(sa_dev->port[i].agent); 2285 } 2286 free: 2287 kfree(sa_dev); 2288 return ret; 2289 } 2290 2291 static void ib_sa_remove_one(struct ib_device *device, void *client_data) 2292 { 2293 struct ib_sa_device *sa_dev = client_data; 2294 int i; 2295 2296 ib_unregister_event_handler(&sa_dev->event_handler); 2297 flush_workqueue(ib_wq); 2298 2299 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 2300 if (rdma_cap_ib_sa(device, i + 1)) { 2301 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work); 2302 ib_unregister_mad_agent(sa_dev->port[i].agent); 2303 if (sa_dev->port[i].sm_ah) 2304 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 2305 } 2306 2307 } 2308 2309 kfree(sa_dev); 2310 } 2311 2312 int ib_sa_init(void) 2313 { 2314 int ret; 2315 2316 get_random_bytes(&tid, sizeof tid); 2317 2318 atomic_set(&ib_nl_sa_request_seq, 0); 2319 2320 ret = ib_register_client(&sa_client); 2321 if (ret) { 2322 pr_err("Couldn't register ib_sa client\n"); 2323 goto err1; 2324 } 2325 2326 ret = mcast_init(); 2327 if (ret) { 2328 pr_err("Couldn't initialize multicast handling\n"); 2329 goto err2; 2330 } 2331 2332 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM); 2333 if (!ib_nl_wq) { 2334 ret = -ENOMEM; 2335 goto err3; 2336 } 2337 2338 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); 2339 2340 return 0; 2341 2342 err3: 2343 mcast_cleanup(); 2344 err2: 2345 ib_unregister_client(&sa_client); 2346 err1: 2347 return ret; 2348 } 2349 2350 void ib_sa_cleanup(void) 2351 { 2352 cancel_delayed_work(&ib_nl_timed_work); 2353 destroy_workqueue(ib_nl_wq); 2354 mcast_cleanup(); 2355 ib_unregister_client(&sa_client); 2356 WARN_ON(!xa_empty(&queries)); 2357 } 2358