1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/init.h> 36 #include <linux/err.h> 37 #include <linux/random.h> 38 #include <linux/spinlock.h> 39 #include <linux/slab.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/kref.h> 42 #include <linux/xarray.h> 43 #include <linux/workqueue.h> 44 #include <uapi/linux/if_ether.h> 45 #include <rdma/ib_pack.h> 46 #include <rdma/ib_cache.h> 47 #include <rdma/rdma_netlink.h> 48 #include <net/netlink.h> 49 #include <uapi/rdma/ib_user_sa.h> 50 #include <rdma/ib_marshall.h> 51 #include <rdma/ib_addr.h> 52 #include <rdma/opa_addr.h> 53 #include <rdma/rdma_cm.h> 54 #include "sa.h" 55 #include "core_priv.h" 56 57 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 58 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 59 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 60 #define IB_SA_CPI_MAX_RETRY_CNT 3 61 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */ 62 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT; 63 64 struct ib_sa_sm_ah { 65 struct ib_ah *ah; 66 struct kref ref; 67 u16 pkey_index; 68 u8 src_path_mask; 69 }; 70 71 enum rdma_class_port_info_type { 72 RDMA_CLASS_PORT_INFO_IB, 73 RDMA_CLASS_PORT_INFO_OPA 74 }; 75 76 struct rdma_class_port_info { 77 enum rdma_class_port_info_type type; 78 union { 79 struct ib_class_port_info ib; 80 struct opa_class_port_info opa; 81 }; 82 }; 83 84 struct ib_sa_classport_cache { 85 bool valid; 86 int retry_cnt; 87 struct rdma_class_port_info data; 88 }; 89 90 struct ib_sa_port { 91 struct ib_mad_agent *agent; 92 struct ib_sa_sm_ah *sm_ah; 93 struct work_struct update_task; 94 struct ib_sa_classport_cache classport_info; 95 struct delayed_work ib_cpi_work; 96 spinlock_t classport_lock; /* protects class port info set */ 97 spinlock_t ah_lock; 98 u32 port_num; 99 }; 100 101 struct ib_sa_device { 102 int start_port, end_port; 103 struct ib_event_handler event_handler; 104 struct ib_sa_port port[]; 105 }; 106 107 struct ib_sa_query { 108 void (*callback)(struct ib_sa_query *sa_query, int status, 109 struct ib_sa_mad *mad); 110 void (*release)(struct ib_sa_query *); 111 struct ib_sa_client *client; 112 struct ib_sa_port *port; 113 struct ib_mad_send_buf *mad_buf; 114 struct ib_sa_sm_ah *sm_ah; 115 int id; 116 u32 flags; 117 struct list_head list; /* Local svc request list */ 118 u32 seq; /* Local svc request sequence number */ 119 unsigned long timeout; /* Local svc timeout */ 120 u8 path_use; /* How will the pathrecord be used */ 121 }; 122 123 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 124 #define IB_SA_CANCEL 0x00000002 125 #define IB_SA_QUERY_OPA 0x00000004 126 127 struct ib_sa_path_query { 128 void (*callback)(int status, struct sa_path_rec *rec, 129 unsigned int num_paths, void *context); 130 void *context; 131 struct ib_sa_query sa_query; 132 struct sa_path_rec *conv_pr; 133 }; 134 135 struct ib_sa_guidinfo_query { 136 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); 137 void *context; 138 struct ib_sa_query sa_query; 139 }; 140 141 struct ib_sa_classport_info_query { 142 void (*callback)(void *); 143 void *context; 144 struct ib_sa_query sa_query; 145 }; 146 147 struct ib_sa_mcmember_query { 148 void (*callback)(int, struct ib_sa_mcmember_rec *, void *); 149 void *context; 150 struct ib_sa_query sa_query; 151 }; 152 153 static LIST_HEAD(ib_nl_request_list); 154 static DEFINE_SPINLOCK(ib_nl_request_lock); 155 static atomic_t ib_nl_sa_request_seq; 156 static struct workqueue_struct *ib_nl_wq; 157 static struct delayed_work ib_nl_timed_work; 158 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = { 159 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY, 160 .len = sizeof(struct ib_path_rec_data)}, 161 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32}, 162 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64}, 163 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, 164 .len = sizeof(struct rdma_nla_ls_gid)}, 165 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY, 166 .len = sizeof(struct rdma_nla_ls_gid)}, 167 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8}, 168 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16}, 169 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16}, 170 }; 171 172 173 static int ib_sa_add_one(struct ib_device *device); 174 static void ib_sa_remove_one(struct ib_device *device, void *client_data); 175 176 static struct ib_client sa_client = { 177 .name = "sa", 178 .add = ib_sa_add_one, 179 .remove = ib_sa_remove_one 180 }; 181 182 static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); 183 184 static DEFINE_SPINLOCK(tid_lock); 185 static u32 tid; 186 187 #define PATH_REC_FIELD(field) \ 188 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \ 189 .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \ 190 .field_name = "sa_path_rec:" #field 191 192 static const struct ib_field path_rec_table[] = { 193 { PATH_REC_FIELD(service_id), 194 .offset_words = 0, 195 .offset_bits = 0, 196 .size_bits = 64 }, 197 { PATH_REC_FIELD(dgid), 198 .offset_words = 2, 199 .offset_bits = 0, 200 .size_bits = 128 }, 201 { PATH_REC_FIELD(sgid), 202 .offset_words = 6, 203 .offset_bits = 0, 204 .size_bits = 128 }, 205 { PATH_REC_FIELD(ib.dlid), 206 .offset_words = 10, 207 .offset_bits = 0, 208 .size_bits = 16 }, 209 { PATH_REC_FIELD(ib.slid), 210 .offset_words = 10, 211 .offset_bits = 16, 212 .size_bits = 16 }, 213 { PATH_REC_FIELD(ib.raw_traffic), 214 .offset_words = 11, 215 .offset_bits = 0, 216 .size_bits = 1 }, 217 { RESERVED, 218 .offset_words = 11, 219 .offset_bits = 1, 220 .size_bits = 3 }, 221 { PATH_REC_FIELD(flow_label), 222 .offset_words = 11, 223 .offset_bits = 4, 224 .size_bits = 20 }, 225 { PATH_REC_FIELD(hop_limit), 226 .offset_words = 11, 227 .offset_bits = 24, 228 .size_bits = 8 }, 229 { PATH_REC_FIELD(traffic_class), 230 .offset_words = 12, 231 .offset_bits = 0, 232 .size_bits = 8 }, 233 { PATH_REC_FIELD(reversible), 234 .offset_words = 12, 235 .offset_bits = 8, 236 .size_bits = 1 }, 237 { PATH_REC_FIELD(numb_path), 238 .offset_words = 12, 239 .offset_bits = 9, 240 .size_bits = 7 }, 241 { PATH_REC_FIELD(pkey), 242 .offset_words = 12, 243 .offset_bits = 16, 244 .size_bits = 16 }, 245 { PATH_REC_FIELD(qos_class), 246 .offset_words = 13, 247 .offset_bits = 0, 248 .size_bits = 12 }, 249 { PATH_REC_FIELD(sl), 250 .offset_words = 13, 251 .offset_bits = 12, 252 .size_bits = 4 }, 253 { PATH_REC_FIELD(mtu_selector), 254 .offset_words = 13, 255 .offset_bits = 16, 256 .size_bits = 2 }, 257 { PATH_REC_FIELD(mtu), 258 .offset_words = 13, 259 .offset_bits = 18, 260 .size_bits = 6 }, 261 { PATH_REC_FIELD(rate_selector), 262 .offset_words = 13, 263 .offset_bits = 24, 264 .size_bits = 2 }, 265 { PATH_REC_FIELD(rate), 266 .offset_words = 13, 267 .offset_bits = 26, 268 .size_bits = 6 }, 269 { PATH_REC_FIELD(packet_life_time_selector), 270 .offset_words = 14, 271 .offset_bits = 0, 272 .size_bits = 2 }, 273 { PATH_REC_FIELD(packet_life_time), 274 .offset_words = 14, 275 .offset_bits = 2, 276 .size_bits = 6 }, 277 { PATH_REC_FIELD(preference), 278 .offset_words = 14, 279 .offset_bits = 8, 280 .size_bits = 8 }, 281 { RESERVED, 282 .offset_words = 14, 283 .offset_bits = 16, 284 .size_bits = 48 }, 285 }; 286 287 #define OPA_PATH_REC_FIELD(field) \ 288 .struct_offset_bytes = \ 289 offsetof(struct sa_path_rec, field), \ 290 .struct_size_bytes = \ 291 sizeof_field(struct sa_path_rec, field), \ 292 .field_name = "sa_path_rec:" #field 293 294 static const struct ib_field opa_path_rec_table[] = { 295 { OPA_PATH_REC_FIELD(service_id), 296 .offset_words = 0, 297 .offset_bits = 0, 298 .size_bits = 64 }, 299 { OPA_PATH_REC_FIELD(dgid), 300 .offset_words = 2, 301 .offset_bits = 0, 302 .size_bits = 128 }, 303 { OPA_PATH_REC_FIELD(sgid), 304 .offset_words = 6, 305 .offset_bits = 0, 306 .size_bits = 128 }, 307 { OPA_PATH_REC_FIELD(opa.dlid), 308 .offset_words = 10, 309 .offset_bits = 0, 310 .size_bits = 32 }, 311 { OPA_PATH_REC_FIELD(opa.slid), 312 .offset_words = 11, 313 .offset_bits = 0, 314 .size_bits = 32 }, 315 { OPA_PATH_REC_FIELD(opa.raw_traffic), 316 .offset_words = 12, 317 .offset_bits = 0, 318 .size_bits = 1 }, 319 { RESERVED, 320 .offset_words = 12, 321 .offset_bits = 1, 322 .size_bits = 3 }, 323 { OPA_PATH_REC_FIELD(flow_label), 324 .offset_words = 12, 325 .offset_bits = 4, 326 .size_bits = 20 }, 327 { OPA_PATH_REC_FIELD(hop_limit), 328 .offset_words = 12, 329 .offset_bits = 24, 330 .size_bits = 8 }, 331 { OPA_PATH_REC_FIELD(traffic_class), 332 .offset_words = 13, 333 .offset_bits = 0, 334 .size_bits = 8 }, 335 { OPA_PATH_REC_FIELD(reversible), 336 .offset_words = 13, 337 .offset_bits = 8, 338 .size_bits = 1 }, 339 { OPA_PATH_REC_FIELD(numb_path), 340 .offset_words = 13, 341 .offset_bits = 9, 342 .size_bits = 7 }, 343 { OPA_PATH_REC_FIELD(pkey), 344 .offset_words = 13, 345 .offset_bits = 16, 346 .size_bits = 16 }, 347 { OPA_PATH_REC_FIELD(opa.l2_8B), 348 .offset_words = 14, 349 .offset_bits = 0, 350 .size_bits = 1 }, 351 { OPA_PATH_REC_FIELD(opa.l2_10B), 352 .offset_words = 14, 353 .offset_bits = 1, 354 .size_bits = 1 }, 355 { OPA_PATH_REC_FIELD(opa.l2_9B), 356 .offset_words = 14, 357 .offset_bits = 2, 358 .size_bits = 1 }, 359 { OPA_PATH_REC_FIELD(opa.l2_16B), 360 .offset_words = 14, 361 .offset_bits = 3, 362 .size_bits = 1 }, 363 { RESERVED, 364 .offset_words = 14, 365 .offset_bits = 4, 366 .size_bits = 2 }, 367 { OPA_PATH_REC_FIELD(opa.qos_type), 368 .offset_words = 14, 369 .offset_bits = 6, 370 .size_bits = 2 }, 371 { OPA_PATH_REC_FIELD(opa.qos_priority), 372 .offset_words = 14, 373 .offset_bits = 8, 374 .size_bits = 8 }, 375 { RESERVED, 376 .offset_words = 14, 377 .offset_bits = 16, 378 .size_bits = 3 }, 379 { OPA_PATH_REC_FIELD(sl), 380 .offset_words = 14, 381 .offset_bits = 19, 382 .size_bits = 5 }, 383 { RESERVED, 384 .offset_words = 14, 385 .offset_bits = 24, 386 .size_bits = 8 }, 387 { OPA_PATH_REC_FIELD(mtu_selector), 388 .offset_words = 15, 389 .offset_bits = 0, 390 .size_bits = 2 }, 391 { OPA_PATH_REC_FIELD(mtu), 392 .offset_words = 15, 393 .offset_bits = 2, 394 .size_bits = 6 }, 395 { OPA_PATH_REC_FIELD(rate_selector), 396 .offset_words = 15, 397 .offset_bits = 8, 398 .size_bits = 2 }, 399 { OPA_PATH_REC_FIELD(rate), 400 .offset_words = 15, 401 .offset_bits = 10, 402 .size_bits = 6 }, 403 { OPA_PATH_REC_FIELD(packet_life_time_selector), 404 .offset_words = 15, 405 .offset_bits = 16, 406 .size_bits = 2 }, 407 { OPA_PATH_REC_FIELD(packet_life_time), 408 .offset_words = 15, 409 .offset_bits = 18, 410 .size_bits = 6 }, 411 { OPA_PATH_REC_FIELD(preference), 412 .offset_words = 15, 413 .offset_bits = 24, 414 .size_bits = 8 }, 415 }; 416 417 #define MCMEMBER_REC_FIELD(field) \ 418 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ 419 .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \ 420 .field_name = "sa_mcmember_rec:" #field 421 422 static const struct ib_field mcmember_rec_table[] = { 423 { MCMEMBER_REC_FIELD(mgid), 424 .offset_words = 0, 425 .offset_bits = 0, 426 .size_bits = 128 }, 427 { MCMEMBER_REC_FIELD(port_gid), 428 .offset_words = 4, 429 .offset_bits = 0, 430 .size_bits = 128 }, 431 { MCMEMBER_REC_FIELD(qkey), 432 .offset_words = 8, 433 .offset_bits = 0, 434 .size_bits = 32 }, 435 { MCMEMBER_REC_FIELD(mlid), 436 .offset_words = 9, 437 .offset_bits = 0, 438 .size_bits = 16 }, 439 { MCMEMBER_REC_FIELD(mtu_selector), 440 .offset_words = 9, 441 .offset_bits = 16, 442 .size_bits = 2 }, 443 { MCMEMBER_REC_FIELD(mtu), 444 .offset_words = 9, 445 .offset_bits = 18, 446 .size_bits = 6 }, 447 { MCMEMBER_REC_FIELD(traffic_class), 448 .offset_words = 9, 449 .offset_bits = 24, 450 .size_bits = 8 }, 451 { MCMEMBER_REC_FIELD(pkey), 452 .offset_words = 10, 453 .offset_bits = 0, 454 .size_bits = 16 }, 455 { MCMEMBER_REC_FIELD(rate_selector), 456 .offset_words = 10, 457 .offset_bits = 16, 458 .size_bits = 2 }, 459 { MCMEMBER_REC_FIELD(rate), 460 .offset_words = 10, 461 .offset_bits = 18, 462 .size_bits = 6 }, 463 { MCMEMBER_REC_FIELD(packet_life_time_selector), 464 .offset_words = 10, 465 .offset_bits = 24, 466 .size_bits = 2 }, 467 { MCMEMBER_REC_FIELD(packet_life_time), 468 .offset_words = 10, 469 .offset_bits = 26, 470 .size_bits = 6 }, 471 { MCMEMBER_REC_FIELD(sl), 472 .offset_words = 11, 473 .offset_bits = 0, 474 .size_bits = 4 }, 475 { MCMEMBER_REC_FIELD(flow_label), 476 .offset_words = 11, 477 .offset_bits = 4, 478 .size_bits = 20 }, 479 { MCMEMBER_REC_FIELD(hop_limit), 480 .offset_words = 11, 481 .offset_bits = 24, 482 .size_bits = 8 }, 483 { MCMEMBER_REC_FIELD(scope), 484 .offset_words = 12, 485 .offset_bits = 0, 486 .size_bits = 4 }, 487 { MCMEMBER_REC_FIELD(join_state), 488 .offset_words = 12, 489 .offset_bits = 4, 490 .size_bits = 4 }, 491 { MCMEMBER_REC_FIELD(proxy_join), 492 .offset_words = 12, 493 .offset_bits = 8, 494 .size_bits = 1 }, 495 { RESERVED, 496 .offset_words = 12, 497 .offset_bits = 9, 498 .size_bits = 23 }, 499 }; 500 501 #define CLASSPORTINFO_REC_FIELD(field) \ 502 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \ 503 .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \ 504 .field_name = "ib_class_port_info:" #field 505 506 static const struct ib_field ib_classport_info_rec_table[] = { 507 { CLASSPORTINFO_REC_FIELD(base_version), 508 .offset_words = 0, 509 .offset_bits = 0, 510 .size_bits = 8 }, 511 { CLASSPORTINFO_REC_FIELD(class_version), 512 .offset_words = 0, 513 .offset_bits = 8, 514 .size_bits = 8 }, 515 { CLASSPORTINFO_REC_FIELD(capability_mask), 516 .offset_words = 0, 517 .offset_bits = 16, 518 .size_bits = 16 }, 519 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 520 .offset_words = 1, 521 .offset_bits = 0, 522 .size_bits = 32 }, 523 { CLASSPORTINFO_REC_FIELD(redirect_gid), 524 .offset_words = 2, 525 .offset_bits = 0, 526 .size_bits = 128 }, 527 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl), 528 .offset_words = 6, 529 .offset_bits = 0, 530 .size_bits = 32 }, 531 { CLASSPORTINFO_REC_FIELD(redirect_lid), 532 .offset_words = 7, 533 .offset_bits = 0, 534 .size_bits = 16 }, 535 { CLASSPORTINFO_REC_FIELD(redirect_pkey), 536 .offset_words = 7, 537 .offset_bits = 16, 538 .size_bits = 16 }, 539 540 { CLASSPORTINFO_REC_FIELD(redirect_qp), 541 .offset_words = 8, 542 .offset_bits = 0, 543 .size_bits = 32 }, 544 { CLASSPORTINFO_REC_FIELD(redirect_qkey), 545 .offset_words = 9, 546 .offset_bits = 0, 547 .size_bits = 32 }, 548 549 { CLASSPORTINFO_REC_FIELD(trap_gid), 550 .offset_words = 10, 551 .offset_bits = 0, 552 .size_bits = 128 }, 553 { CLASSPORTINFO_REC_FIELD(trap_tcslfl), 554 .offset_words = 14, 555 .offset_bits = 0, 556 .size_bits = 32 }, 557 558 { CLASSPORTINFO_REC_FIELD(trap_lid), 559 .offset_words = 15, 560 .offset_bits = 0, 561 .size_bits = 16 }, 562 { CLASSPORTINFO_REC_FIELD(trap_pkey), 563 .offset_words = 15, 564 .offset_bits = 16, 565 .size_bits = 16 }, 566 567 { CLASSPORTINFO_REC_FIELD(trap_hlqp), 568 .offset_words = 16, 569 .offset_bits = 0, 570 .size_bits = 32 }, 571 { CLASSPORTINFO_REC_FIELD(trap_qkey), 572 .offset_words = 17, 573 .offset_bits = 0, 574 .size_bits = 32 }, 575 }; 576 577 #define OPA_CLASSPORTINFO_REC_FIELD(field) \ 578 .struct_offset_bytes =\ 579 offsetof(struct opa_class_port_info, field), \ 580 .struct_size_bytes = \ 581 sizeof_field(struct opa_class_port_info, field), \ 582 .field_name = "opa_class_port_info:" #field 583 584 static const struct ib_field opa_classport_info_rec_table[] = { 585 { OPA_CLASSPORTINFO_REC_FIELD(base_version), 586 .offset_words = 0, 587 .offset_bits = 0, 588 .size_bits = 8 }, 589 { OPA_CLASSPORTINFO_REC_FIELD(class_version), 590 .offset_words = 0, 591 .offset_bits = 8, 592 .size_bits = 8 }, 593 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask), 594 .offset_words = 0, 595 .offset_bits = 16, 596 .size_bits = 16 }, 597 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 598 .offset_words = 1, 599 .offset_bits = 0, 600 .size_bits = 32 }, 601 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid), 602 .offset_words = 2, 603 .offset_bits = 0, 604 .size_bits = 128 }, 605 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl), 606 .offset_words = 6, 607 .offset_bits = 0, 608 .size_bits = 32 }, 609 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid), 610 .offset_words = 7, 611 .offset_bits = 0, 612 .size_bits = 32 }, 613 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp), 614 .offset_words = 8, 615 .offset_bits = 0, 616 .size_bits = 32 }, 617 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey), 618 .offset_words = 9, 619 .offset_bits = 0, 620 .size_bits = 32 }, 621 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid), 622 .offset_words = 10, 623 .offset_bits = 0, 624 .size_bits = 128 }, 625 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl), 626 .offset_words = 14, 627 .offset_bits = 0, 628 .size_bits = 32 }, 629 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid), 630 .offset_words = 15, 631 .offset_bits = 0, 632 .size_bits = 32 }, 633 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp), 634 .offset_words = 16, 635 .offset_bits = 0, 636 .size_bits = 32 }, 637 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey), 638 .offset_words = 17, 639 .offset_bits = 0, 640 .size_bits = 32 }, 641 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey), 642 .offset_words = 18, 643 .offset_bits = 0, 644 .size_bits = 16 }, 645 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey), 646 .offset_words = 18, 647 .offset_bits = 16, 648 .size_bits = 16 }, 649 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd), 650 .offset_words = 19, 651 .offset_bits = 0, 652 .size_bits = 8 }, 653 { RESERVED, 654 .offset_words = 19, 655 .offset_bits = 8, 656 .size_bits = 24 }, 657 }; 658 659 #define GUIDINFO_REC_FIELD(field) \ 660 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ 661 .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \ 662 .field_name = "sa_guidinfo_rec:" #field 663 664 static const struct ib_field guidinfo_rec_table[] = { 665 { GUIDINFO_REC_FIELD(lid), 666 .offset_words = 0, 667 .offset_bits = 0, 668 .size_bits = 16 }, 669 { GUIDINFO_REC_FIELD(block_num), 670 .offset_words = 0, 671 .offset_bits = 16, 672 .size_bits = 8 }, 673 { GUIDINFO_REC_FIELD(res1), 674 .offset_words = 0, 675 .offset_bits = 24, 676 .size_bits = 8 }, 677 { GUIDINFO_REC_FIELD(res2), 678 .offset_words = 1, 679 .offset_bits = 0, 680 .size_bits = 32 }, 681 { GUIDINFO_REC_FIELD(guid_info_list), 682 .offset_words = 2, 683 .offset_bits = 0, 684 .size_bits = 512 }, 685 }; 686 687 #define RDMA_PRIMARY_PATH_MAX_REC_NUM 3 688 689 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) 690 { 691 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; 692 } 693 694 static inline int ib_sa_query_cancelled(struct ib_sa_query *query) 695 { 696 return (query->flags & IB_SA_CANCEL); 697 } 698 699 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, 700 struct ib_sa_query *query) 701 { 702 struct sa_path_rec *sa_rec = query->mad_buf->context[1]; 703 struct ib_sa_mad *mad = query->mad_buf->mad; 704 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; 705 u16 val16; 706 u64 val64; 707 struct rdma_ls_resolve_header *header; 708 709 query->mad_buf->context[1] = NULL; 710 711 /* Construct the family header first */ 712 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 713 strscpy_pad(header->device_name, 714 dev_name(&query->port->agent->device->dev), 715 LS_DEVICE_NAME_MAX); 716 header->port_num = query->port->port_num; 717 718 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && 719 sa_rec->reversible != 0) 720 query->path_use = LS_RESOLVE_PATH_USE_ALL; 721 else 722 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; 723 header->path_use = query->path_use; 724 725 /* Now build the attributes */ 726 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 727 val64 = be64_to_cpu(sa_rec->service_id); 728 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 729 sizeof(val64), &val64); 730 } 731 if (comp_mask & IB_SA_PATH_REC_DGID) 732 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID, 733 sizeof(sa_rec->dgid), &sa_rec->dgid); 734 if (comp_mask & IB_SA_PATH_REC_SGID) 735 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID, 736 sizeof(sa_rec->sgid), &sa_rec->sgid); 737 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 738 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS, 739 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class); 740 741 if (comp_mask & IB_SA_PATH_REC_PKEY) { 742 val16 = be16_to_cpu(sa_rec->pkey); 743 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY, 744 sizeof(val16), &val16); 745 } 746 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) { 747 val16 = be16_to_cpu(sa_rec->qos_class); 748 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS, 749 sizeof(val16), &val16); 750 } 751 } 752 753 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) 754 { 755 int len = 0; 756 757 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) 758 len += nla_total_size(sizeof(u64)); 759 if (comp_mask & IB_SA_PATH_REC_DGID) 760 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 761 if (comp_mask & IB_SA_PATH_REC_SGID) 762 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 763 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 764 len += nla_total_size(sizeof(u8)); 765 if (comp_mask & IB_SA_PATH_REC_PKEY) 766 len += nla_total_size(sizeof(u16)); 767 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) 768 len += nla_total_size(sizeof(u16)); 769 770 /* 771 * Make sure that at least some of the required comp_mask bits are 772 * set. 773 */ 774 if (WARN_ON(len == 0)) 775 return len; 776 777 /* Add the family header */ 778 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header)); 779 780 return len; 781 } 782 783 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) 784 { 785 struct sk_buff *skb = NULL; 786 struct nlmsghdr *nlh; 787 void *data; 788 struct ib_sa_mad *mad; 789 int len; 790 unsigned long flags; 791 unsigned long delay; 792 gfp_t gfp_flag; 793 int ret; 794 795 INIT_LIST_HEAD(&query->list); 796 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 797 798 mad = query->mad_buf->mad; 799 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); 800 if (len <= 0) 801 return -EMSGSIZE; 802 803 skb = nlmsg_new(len, gfp_mask); 804 if (!skb) 805 return -ENOMEM; 806 807 /* Put nlmsg header only for now */ 808 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, 809 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); 810 if (!data) { 811 nlmsg_free(skb); 812 return -EMSGSIZE; 813 } 814 815 /* Add attributes */ 816 ib_nl_set_path_rec_attrs(skb, query); 817 818 /* Repair the nlmsg header length */ 819 nlmsg_end(skb, nlh); 820 821 gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC : 822 GFP_NOWAIT; 823 824 spin_lock_irqsave(&ib_nl_request_lock, flags); 825 ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag); 826 827 if (ret) 828 goto out; 829 830 /* Put the request on the list.*/ 831 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 832 query->timeout = delay + jiffies; 833 list_add_tail(&query->list, &ib_nl_request_list); 834 /* Start the timeout if this is the only request */ 835 if (ib_nl_request_list.next == &query->list) 836 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 837 838 out: 839 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 840 841 return ret; 842 } 843 844 static int ib_nl_cancel_request(struct ib_sa_query *query) 845 { 846 unsigned long flags; 847 struct ib_sa_query *wait_query; 848 int found = 0; 849 850 spin_lock_irqsave(&ib_nl_request_lock, flags); 851 list_for_each_entry(wait_query, &ib_nl_request_list, list) { 852 /* Let the timeout to take care of the callback */ 853 if (query == wait_query) { 854 query->flags |= IB_SA_CANCEL; 855 query->timeout = jiffies; 856 list_move(&query->list, &ib_nl_request_list); 857 found = 1; 858 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1); 859 break; 860 } 861 } 862 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 863 864 return found; 865 } 866 867 static void send_handler(struct ib_mad_agent *agent, 868 struct ib_mad_send_wc *mad_send_wc); 869 870 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, 871 const struct nlmsghdr *nlh) 872 { 873 struct sa_path_rec recs[RDMA_PRIMARY_PATH_MAX_REC_NUM]; 874 struct ib_sa_path_query *path_query; 875 struct ib_path_rec_data *rec_data; 876 struct ib_mad_send_wc mad_send_wc; 877 const struct nlattr *head, *curr; 878 struct ib_sa_mad *mad = NULL; 879 int len, rem, status = -EIO; 880 unsigned int num_prs = 0; 881 u32 mask = 0; 882 883 if (!query->callback) 884 goto out; 885 886 path_query = container_of(query, struct ib_sa_path_query, sa_query); 887 mad = query->mad_buf->mad; 888 889 head = (const struct nlattr *) nlmsg_data(nlh); 890 len = nlmsg_len(nlh); 891 switch (query->path_use) { 892 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL: 893 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND; 894 break; 895 896 case LS_RESOLVE_PATH_USE_ALL: 897 mask = IB_PATH_PRIMARY; 898 break; 899 900 case LS_RESOLVE_PATH_USE_GMP: 901 default: 902 mask = IB_PATH_PRIMARY | IB_PATH_GMP | 903 IB_PATH_BIDIRECTIONAL; 904 break; 905 } 906 907 nla_for_each_attr(curr, head, len, rem) { 908 if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD) 909 continue; 910 911 rec_data = nla_data(curr); 912 if ((rec_data->flags & mask) != mask) 913 continue; 914 915 if ((query->flags & IB_SA_QUERY_OPA) || 916 path_query->conv_pr) { 917 mad->mad_hdr.method |= IB_MGMT_METHOD_RESP; 918 memcpy(mad->data, rec_data->path_rec, 919 sizeof(rec_data->path_rec)); 920 query->callback(query, 0, mad); 921 goto out; 922 } 923 924 status = 0; 925 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 926 rec_data->path_rec, &recs[num_prs]); 927 recs[num_prs].flags = rec_data->flags; 928 recs[num_prs].rec_type = SA_PATH_REC_TYPE_IB; 929 sa_path_set_dmac_zero(&recs[num_prs]); 930 931 num_prs++; 932 if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM) 933 break; 934 } 935 936 if (!status) { 937 mad->mad_hdr.method |= IB_MGMT_METHOD_RESP; 938 path_query->callback(status, recs, num_prs, 939 path_query->context); 940 } else 941 query->callback(query, status, mad); 942 943 out: 944 mad_send_wc.send_buf = query->mad_buf; 945 mad_send_wc.status = IB_WC_SUCCESS; 946 send_handler(query->mad_buf->mad_agent, &mad_send_wc); 947 } 948 949 static void ib_nl_request_timeout(struct work_struct *work) 950 { 951 unsigned long flags; 952 struct ib_sa_query *query; 953 unsigned long delay; 954 struct ib_mad_send_wc mad_send_wc; 955 int ret; 956 957 spin_lock_irqsave(&ib_nl_request_lock, flags); 958 while (!list_empty(&ib_nl_request_list)) { 959 query = list_entry(ib_nl_request_list.next, 960 struct ib_sa_query, list); 961 962 if (time_after(query->timeout, jiffies)) { 963 delay = query->timeout - jiffies; 964 if ((long)delay <= 0) 965 delay = 1; 966 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 967 break; 968 } 969 970 list_del(&query->list); 971 ib_sa_disable_local_svc(query); 972 /* Hold the lock to protect against query cancellation */ 973 if (ib_sa_query_cancelled(query)) 974 ret = -1; 975 else 976 ret = ib_post_send_mad(query->mad_buf, NULL); 977 if (ret) { 978 mad_send_wc.send_buf = query->mad_buf; 979 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 980 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 981 send_handler(query->port->agent, &mad_send_wc); 982 spin_lock_irqsave(&ib_nl_request_lock, flags); 983 } 984 } 985 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 986 } 987 988 int ib_nl_handle_set_timeout(struct sk_buff *skb, 989 struct nlmsghdr *nlh, 990 struct netlink_ext_ack *extack) 991 { 992 int timeout, delta, abs_delta; 993 const struct nlattr *attr; 994 unsigned long flags; 995 struct ib_sa_query *query; 996 long delay = 0; 997 struct nlattr *tb[LS_NLA_TYPE_MAX]; 998 int ret; 999 1000 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) || 1001 !(NETLINK_CB(skb).sk)) 1002 return -EPERM; 1003 1004 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 1005 nlmsg_len(nlh), ib_nl_policy, NULL); 1006 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; 1007 if (ret || !attr) 1008 goto settimeout_out; 1009 1010 timeout = *(int *) nla_data(attr); 1011 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN) 1012 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN; 1013 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) 1014 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; 1015 1016 delta = timeout - sa_local_svc_timeout_ms; 1017 if (delta < 0) 1018 abs_delta = -delta; 1019 else 1020 abs_delta = delta; 1021 1022 if (delta != 0) { 1023 spin_lock_irqsave(&ib_nl_request_lock, flags); 1024 sa_local_svc_timeout_ms = timeout; 1025 list_for_each_entry(query, &ib_nl_request_list, list) { 1026 if (delta < 0 && abs_delta > query->timeout) 1027 query->timeout = 0; 1028 else 1029 query->timeout += delta; 1030 1031 /* Get the new delay from the first entry */ 1032 if (!delay) { 1033 delay = query->timeout - jiffies; 1034 if (delay <= 0) 1035 delay = 1; 1036 } 1037 } 1038 if (delay) 1039 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1040 (unsigned long)delay); 1041 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1042 } 1043 1044 settimeout_out: 1045 return 0; 1046 } 1047 1048 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) 1049 { 1050 struct nlattr *tb[LS_NLA_TYPE_MAX]; 1051 int ret; 1052 1053 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) 1054 return 0; 1055 1056 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 1057 nlmsg_len(nlh), ib_nl_policy, NULL); 1058 if (ret) 1059 return 0; 1060 1061 return 1; 1062 } 1063 1064 int ib_nl_handle_resolve_resp(struct sk_buff *skb, 1065 struct nlmsghdr *nlh, 1066 struct netlink_ext_ack *extack) 1067 { 1068 unsigned long flags; 1069 struct ib_sa_query *query = NULL, *iter; 1070 struct ib_mad_send_buf *send_buf; 1071 struct ib_mad_send_wc mad_send_wc; 1072 int ret; 1073 1074 if ((nlh->nlmsg_flags & NLM_F_REQUEST) || 1075 !(NETLINK_CB(skb).sk)) 1076 return -EPERM; 1077 1078 spin_lock_irqsave(&ib_nl_request_lock, flags); 1079 list_for_each_entry(iter, &ib_nl_request_list, list) { 1080 /* 1081 * If the query is cancelled, let the timeout routine 1082 * take care of it. 1083 */ 1084 if (nlh->nlmsg_seq == iter->seq) { 1085 if (!ib_sa_query_cancelled(iter)) { 1086 list_del(&iter->list); 1087 query = iter; 1088 } 1089 break; 1090 } 1091 } 1092 1093 if (!query) { 1094 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1095 goto resp_out; 1096 } 1097 1098 send_buf = query->mad_buf; 1099 1100 if (!ib_nl_is_good_resolve_resp(nlh)) { 1101 /* if the result is a failure, send out the packet via IB */ 1102 ib_sa_disable_local_svc(query); 1103 ret = ib_post_send_mad(query->mad_buf, NULL); 1104 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1105 if (ret) { 1106 mad_send_wc.send_buf = send_buf; 1107 mad_send_wc.status = IB_WC_GENERAL_ERR; 1108 send_handler(query->port->agent, &mad_send_wc); 1109 } 1110 } else { 1111 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1112 ib_nl_process_good_resolve_rsp(query, nlh); 1113 } 1114 1115 resp_out: 1116 return 0; 1117 } 1118 1119 static void free_sm_ah(struct kref *kref) 1120 { 1121 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 1122 1123 rdma_destroy_ah(sm_ah->ah, 0); 1124 kfree(sm_ah); 1125 } 1126 1127 void ib_sa_register_client(struct ib_sa_client *client) 1128 { 1129 atomic_set(&client->users, 1); 1130 init_completion(&client->comp); 1131 } 1132 EXPORT_SYMBOL(ib_sa_register_client); 1133 1134 void ib_sa_unregister_client(struct ib_sa_client *client) 1135 { 1136 ib_sa_client_put(client); 1137 wait_for_completion(&client->comp); 1138 } 1139 EXPORT_SYMBOL(ib_sa_unregister_client); 1140 1141 /** 1142 * ib_sa_cancel_query - try to cancel an SA query 1143 * @id:ID of query to cancel 1144 * @query:query pointer to cancel 1145 * 1146 * Try to cancel an SA query. If the id and query don't match up or 1147 * the query has already completed, nothing is done. Otherwise the 1148 * query is canceled and will complete with a status of -EINTR. 1149 */ 1150 void ib_sa_cancel_query(int id, struct ib_sa_query *query) 1151 { 1152 unsigned long flags; 1153 struct ib_mad_send_buf *mad_buf; 1154 1155 xa_lock_irqsave(&queries, flags); 1156 if (xa_load(&queries, id) != query) { 1157 xa_unlock_irqrestore(&queries, flags); 1158 return; 1159 } 1160 mad_buf = query->mad_buf; 1161 xa_unlock_irqrestore(&queries, flags); 1162 1163 /* 1164 * If the query is still on the netlink request list, schedule 1165 * it to be cancelled by the timeout routine. Otherwise, it has been 1166 * sent to the MAD layer and has to be cancelled from there. 1167 */ 1168 if (!ib_nl_cancel_request(query)) 1169 ib_cancel_mad(mad_buf); 1170 } 1171 EXPORT_SYMBOL(ib_sa_cancel_query); 1172 1173 static u8 get_src_path_mask(struct ib_device *device, u32 port_num) 1174 { 1175 struct ib_sa_device *sa_dev; 1176 struct ib_sa_port *port; 1177 unsigned long flags; 1178 u8 src_path_mask; 1179 1180 sa_dev = ib_get_client_data(device, &sa_client); 1181 if (!sa_dev) 1182 return 0x7f; 1183 1184 port = &sa_dev->port[port_num - sa_dev->start_port]; 1185 spin_lock_irqsave(&port->ah_lock, flags); 1186 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; 1187 spin_unlock_irqrestore(&port->ah_lock, flags); 1188 1189 return src_path_mask; 1190 } 1191 1192 static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num, 1193 struct sa_path_rec *rec, 1194 struct rdma_ah_attr *ah_attr, 1195 const struct ib_gid_attr *gid_attr) 1196 { 1197 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec); 1198 1199 if (!gid_attr) { 1200 gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type, 1201 port_num, NULL); 1202 if (IS_ERR(gid_attr)) 1203 return PTR_ERR(gid_attr); 1204 } else 1205 rdma_hold_gid_attr(gid_attr); 1206 1207 rdma_move_grh_sgid_attr(ah_attr, &rec->dgid, 1208 be32_to_cpu(rec->flow_label), 1209 rec->hop_limit, rec->traffic_class, 1210 gid_attr); 1211 return 0; 1212 } 1213 1214 /** 1215 * ib_init_ah_attr_from_path - Initialize address handle attributes based on 1216 * an SA path record. 1217 * @device: Device associated ah attributes initialization. 1218 * @port_num: Port on the specified device. 1219 * @rec: path record entry to use for ah attributes initialization. 1220 * @ah_attr: address handle attributes to initialization from path record. 1221 * @gid_attr: SGID attribute to consider during initialization. 1222 * 1223 * When ib_init_ah_attr_from_path() returns success, 1224 * (a) for IB link layer it optionally contains a reference to SGID attribute 1225 * when GRH is present for IB link layer. 1226 * (b) for RoCE link layer it contains a reference to SGID attribute. 1227 * User must invoke rdma_destroy_ah_attr() to release reference to SGID 1228 * attributes which are initialized using ib_init_ah_attr_from_path(). 1229 */ 1230 int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num, 1231 struct sa_path_rec *rec, 1232 struct rdma_ah_attr *ah_attr, 1233 const struct ib_gid_attr *gid_attr) 1234 { 1235 int ret = 0; 1236 1237 memset(ah_attr, 0, sizeof(*ah_attr)); 1238 ah_attr->type = rdma_ah_find_type(device, port_num); 1239 rdma_ah_set_sl(ah_attr, rec->sl); 1240 rdma_ah_set_port_num(ah_attr, port_num); 1241 rdma_ah_set_static_rate(ah_attr, rec->rate); 1242 1243 if (sa_path_is_roce(rec)) { 1244 ret = roce_resolve_route_from_path(rec, gid_attr); 1245 if (ret) 1246 return ret; 1247 1248 memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN); 1249 } else { 1250 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec))); 1251 if (sa_path_is_opa(rec) && 1252 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) 1253 rdma_ah_set_make_grd(ah_attr, true); 1254 1255 rdma_ah_set_path_bits(ah_attr, 1256 be32_to_cpu(sa_path_get_slid(rec)) & 1257 get_src_path_mask(device, port_num)); 1258 } 1259 1260 if (rec->hop_limit > 0 || sa_path_is_roce(rec)) 1261 ret = init_ah_attr_grh_fields(device, port_num, 1262 rec, ah_attr, gid_attr); 1263 return ret; 1264 } 1265 EXPORT_SYMBOL(ib_init_ah_attr_from_path); 1266 1267 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) 1268 { 1269 struct rdma_ah_attr ah_attr; 1270 unsigned long flags; 1271 1272 spin_lock_irqsave(&query->port->ah_lock, flags); 1273 if (!query->port->sm_ah) { 1274 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1275 return -EAGAIN; 1276 } 1277 kref_get(&query->port->sm_ah->ref); 1278 query->sm_ah = query->port->sm_ah; 1279 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1280 1281 /* 1282 * Always check if sm_ah has valid dlid assigned, 1283 * before querying for class port info 1284 */ 1285 if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) || 1286 !rdma_is_valid_unicast_lid(&ah_attr)) { 1287 kref_put(&query->sm_ah->ref, free_sm_ah); 1288 return -EAGAIN; 1289 } 1290 query->mad_buf = ib_create_send_mad(query->port->agent, 1, 1291 query->sm_ah->pkey_index, 1292 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 1293 gfp_mask, 1294 ((query->flags & IB_SA_QUERY_OPA) ? 1295 OPA_MGMT_BASE_VERSION : 1296 IB_MGMT_BASE_VERSION)); 1297 if (IS_ERR(query->mad_buf)) { 1298 kref_put(&query->sm_ah->ref, free_sm_ah); 1299 return -ENOMEM; 1300 } 1301 1302 query->mad_buf->ah = query->sm_ah->ah; 1303 1304 return 0; 1305 } 1306 1307 static void free_mad(struct ib_sa_query *query) 1308 { 1309 ib_free_send_mad(query->mad_buf); 1310 kref_put(&query->sm_ah->ref, free_sm_ah); 1311 } 1312 1313 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent) 1314 { 1315 struct ib_sa_mad *mad = query->mad_buf->mad; 1316 unsigned long flags; 1317 1318 memset(mad, 0, sizeof *mad); 1319 1320 if (query->flags & IB_SA_QUERY_OPA) { 1321 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION; 1322 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION; 1323 } else { 1324 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; 1325 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; 1326 } 1327 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 1328 spin_lock_irqsave(&tid_lock, flags); 1329 mad->mad_hdr.tid = 1330 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); 1331 spin_unlock_irqrestore(&tid_lock, flags); 1332 } 1333 1334 static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms, 1335 gfp_t gfp_mask) 1336 { 1337 unsigned long flags; 1338 int ret, id; 1339 const int nmbr_sa_query_retries = 10; 1340 1341 xa_lock_irqsave(&queries, flags); 1342 ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask); 1343 xa_unlock_irqrestore(&queries, flags); 1344 if (ret < 0) 1345 return ret; 1346 1347 query->mad_buf->timeout_ms = timeout_ms / nmbr_sa_query_retries; 1348 query->mad_buf->retries = nmbr_sa_query_retries; 1349 if (!query->mad_buf->timeout_ms) { 1350 /* Special case, very small timeout_ms */ 1351 query->mad_buf->timeout_ms = 1; 1352 query->mad_buf->retries = timeout_ms; 1353 } 1354 query->mad_buf->context[0] = query; 1355 query->id = id; 1356 1357 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) && 1358 (!(query->flags & IB_SA_QUERY_OPA))) { 1359 if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) { 1360 if (!ib_nl_make_request(query, gfp_mask)) 1361 return id; 1362 } 1363 ib_sa_disable_local_svc(query); 1364 } 1365 1366 ret = ib_post_send_mad(query->mad_buf, NULL); 1367 if (ret) { 1368 xa_lock_irqsave(&queries, flags); 1369 __xa_erase(&queries, id); 1370 xa_unlock_irqrestore(&queries, flags); 1371 } 1372 1373 /* 1374 * It's not safe to dereference query any more, because the 1375 * send may already have completed and freed the query in 1376 * another context. 1377 */ 1378 return ret ? ret : id; 1379 } 1380 1381 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec) 1382 { 1383 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); 1384 } 1385 EXPORT_SYMBOL(ib_sa_unpack_path); 1386 1387 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute) 1388 { 1389 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); 1390 } 1391 EXPORT_SYMBOL(ib_sa_pack_path); 1392 1393 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client, 1394 struct ib_sa_device *sa_dev, 1395 u32 port_num) 1396 { 1397 struct ib_sa_port *port; 1398 unsigned long flags; 1399 bool ret = false; 1400 1401 port = &sa_dev->port[port_num - sa_dev->start_port]; 1402 spin_lock_irqsave(&port->classport_lock, flags); 1403 if (!port->classport_info.valid) 1404 goto ret; 1405 1406 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA) 1407 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) & 1408 OPA_CLASS_PORT_INFO_PR_SUPPORT; 1409 ret: 1410 spin_unlock_irqrestore(&port->classport_lock, flags); 1411 return ret; 1412 } 1413 1414 enum opa_pr_supported { 1415 PR_NOT_SUPPORTED, 1416 PR_OPA_SUPPORTED, 1417 PR_IB_SUPPORTED 1418 }; 1419 1420 /* 1421 * opa_pr_query_possible - Check if current PR query can be an OPA query. 1422 * 1423 * Retuns PR_NOT_SUPPORTED if a path record query is not 1424 * possible, PR_OPA_SUPPORTED if an OPA path record query 1425 * is possible and PR_IB_SUPPORTED if an IB path record 1426 * query is possible. 1427 */ 1428 static int opa_pr_query_possible(struct ib_sa_client *client, 1429 struct ib_sa_device *sa_dev, 1430 struct ib_device *device, u32 port_num) 1431 { 1432 struct ib_port_attr port_attr; 1433 1434 if (ib_query_port(device, port_num, &port_attr)) 1435 return PR_NOT_SUPPORTED; 1436 1437 if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num)) 1438 return PR_OPA_SUPPORTED; 1439 1440 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 1441 return PR_NOT_SUPPORTED; 1442 else 1443 return PR_IB_SUPPORTED; 1444 } 1445 1446 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 1447 int status, struct ib_sa_mad *mad) 1448 { 1449 struct ib_sa_path_query *query = 1450 container_of(sa_query, struct ib_sa_path_query, sa_query); 1451 struct sa_path_rec rec = {}; 1452 1453 if (!mad) { 1454 query->callback(status, NULL, 0, query->context); 1455 return; 1456 } 1457 1458 if (sa_query->flags & IB_SA_QUERY_OPA) { 1459 ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), 1460 mad->data, &rec); 1461 rec.rec_type = SA_PATH_REC_TYPE_OPA; 1462 query->callback(status, &rec, 1, query->context); 1463 return; 1464 } 1465 1466 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 1467 mad->data, &rec); 1468 rec.rec_type = SA_PATH_REC_TYPE_IB; 1469 sa_path_set_dmac_zero(&rec); 1470 1471 if (query->conv_pr) { 1472 struct sa_path_rec opa; 1473 1474 memset(&opa, 0, sizeof(struct sa_path_rec)); 1475 sa_convert_path_ib_to_opa(&opa, &rec); 1476 query->callback(status, &opa, 1, query->context); 1477 } else { 1478 query->callback(status, &rec, 1, query->context); 1479 } 1480 } 1481 1482 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 1483 { 1484 struct ib_sa_path_query *query = 1485 container_of(sa_query, struct ib_sa_path_query, sa_query); 1486 1487 kfree(query->conv_pr); 1488 kfree(query); 1489 } 1490 1491 /** 1492 * ib_sa_path_rec_get - Start a Path get query 1493 * @client:SA client 1494 * @device:device to send query on 1495 * @port_num: port number to send query on 1496 * @rec:Path Record to send in query 1497 * @comp_mask:component mask to send in query 1498 * @timeout_ms:time to wait for response 1499 * @gfp_mask:GFP mask to use for internal allocations 1500 * @callback:function called when query completes, times out or is 1501 * canceled 1502 * @context:opaque user context passed to callback 1503 * @sa_query:query context, used to cancel query 1504 * 1505 * Send a Path Record Get query to the SA to look up a path. The 1506 * callback function will be called when the query completes (or 1507 * fails); status is 0 for a successful response, -EINTR if the query 1508 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1509 * occurred sending the query. The resp parameter of the callback is 1510 * only valid if status is 0. 1511 * 1512 * If the return value of ib_sa_path_rec_get() is negative, it is an 1513 * error code. Otherwise it is a query ID that can be used to cancel 1514 * the query. 1515 */ 1516 int ib_sa_path_rec_get(struct ib_sa_client *client, 1517 struct ib_device *device, u32 port_num, 1518 struct sa_path_rec *rec, 1519 ib_sa_comp_mask comp_mask, 1520 unsigned long timeout_ms, gfp_t gfp_mask, 1521 void (*callback)(int status, 1522 struct sa_path_rec *resp, 1523 unsigned int num_paths, void *context), 1524 void *context, 1525 struct ib_sa_query **sa_query) 1526 { 1527 struct ib_sa_path_query *query; 1528 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1529 struct ib_sa_port *port; 1530 struct ib_mad_agent *agent; 1531 struct ib_sa_mad *mad; 1532 enum opa_pr_supported status; 1533 int ret; 1534 1535 if (!sa_dev) 1536 return -ENODEV; 1537 1538 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) && 1539 (rec->rec_type != SA_PATH_REC_TYPE_OPA)) 1540 return -EINVAL; 1541 1542 port = &sa_dev->port[port_num - sa_dev->start_port]; 1543 agent = port->agent; 1544 1545 query = kzalloc(sizeof(*query), gfp_mask); 1546 if (!query) 1547 return -ENOMEM; 1548 1549 query->sa_query.port = port; 1550 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { 1551 status = opa_pr_query_possible(client, sa_dev, device, port_num); 1552 if (status == PR_NOT_SUPPORTED) { 1553 ret = -EINVAL; 1554 goto err1; 1555 } else if (status == PR_OPA_SUPPORTED) { 1556 query->sa_query.flags |= IB_SA_QUERY_OPA; 1557 } else { 1558 query->conv_pr = 1559 kmalloc(sizeof(*query->conv_pr), gfp_mask); 1560 if (!query->conv_pr) { 1561 ret = -ENOMEM; 1562 goto err1; 1563 } 1564 } 1565 } 1566 1567 ret = alloc_mad(&query->sa_query, gfp_mask); 1568 if (ret) 1569 goto err2; 1570 1571 ib_sa_client_get(client); 1572 query->sa_query.client = client; 1573 query->callback = callback; 1574 query->context = context; 1575 1576 mad = query->sa_query.mad_buf->mad; 1577 init_mad(&query->sa_query, agent); 1578 1579 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 1580 query->sa_query.release = ib_sa_path_rec_release; 1581 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1582 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 1583 mad->sa_hdr.comp_mask = comp_mask; 1584 1585 if (query->sa_query.flags & IB_SA_QUERY_OPA) { 1586 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), 1587 rec, mad->data); 1588 } else if (query->conv_pr) { 1589 sa_convert_path_opa_to_ib(query->conv_pr, rec); 1590 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 1591 query->conv_pr, mad->data); 1592 } else { 1593 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 1594 rec, mad->data); 1595 } 1596 1597 *sa_query = &query->sa_query; 1598 1599 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; 1600 query->sa_query.mad_buf->context[1] = (query->conv_pr) ? 1601 query->conv_pr : rec; 1602 1603 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1604 if (ret < 0) 1605 goto err3; 1606 1607 return ret; 1608 1609 err3: 1610 *sa_query = NULL; 1611 ib_sa_client_put(query->sa_query.client); 1612 free_mad(&query->sa_query); 1613 err2: 1614 kfree(query->conv_pr); 1615 err1: 1616 kfree(query); 1617 return ret; 1618 } 1619 EXPORT_SYMBOL(ib_sa_path_rec_get); 1620 1621 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 1622 int status, struct ib_sa_mad *mad) 1623 { 1624 struct ib_sa_mcmember_query *query = 1625 container_of(sa_query, struct ib_sa_mcmember_query, sa_query); 1626 1627 if (mad) { 1628 struct ib_sa_mcmember_rec rec; 1629 1630 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1631 mad->data, &rec); 1632 query->callback(status, &rec, query->context); 1633 } else 1634 query->callback(status, NULL, query->context); 1635 } 1636 1637 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 1638 { 1639 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 1640 } 1641 1642 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 1643 struct ib_device *device, u32 port_num, 1644 u8 method, 1645 struct ib_sa_mcmember_rec *rec, 1646 ib_sa_comp_mask comp_mask, 1647 unsigned long timeout_ms, gfp_t gfp_mask, 1648 void (*callback)(int status, 1649 struct ib_sa_mcmember_rec *resp, 1650 void *context), 1651 void *context, 1652 struct ib_sa_query **sa_query) 1653 { 1654 struct ib_sa_mcmember_query *query; 1655 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1656 struct ib_sa_port *port; 1657 struct ib_mad_agent *agent; 1658 struct ib_sa_mad *mad; 1659 int ret; 1660 1661 if (!sa_dev) 1662 return -ENODEV; 1663 1664 port = &sa_dev->port[port_num - sa_dev->start_port]; 1665 agent = port->agent; 1666 1667 query = kzalloc(sizeof(*query), gfp_mask); 1668 if (!query) 1669 return -ENOMEM; 1670 1671 query->sa_query.port = port; 1672 ret = alloc_mad(&query->sa_query, gfp_mask); 1673 if (ret) 1674 goto err1; 1675 1676 ib_sa_client_get(client); 1677 query->sa_query.client = client; 1678 query->callback = callback; 1679 query->context = context; 1680 1681 mad = query->sa_query.mad_buf->mad; 1682 init_mad(&query->sa_query, agent); 1683 1684 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 1685 query->sa_query.release = ib_sa_mcmember_rec_release; 1686 mad->mad_hdr.method = method; 1687 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 1688 mad->sa_hdr.comp_mask = comp_mask; 1689 1690 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1691 rec, mad->data); 1692 1693 *sa_query = &query->sa_query; 1694 1695 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1696 if (ret < 0) 1697 goto err2; 1698 1699 return ret; 1700 1701 err2: 1702 *sa_query = NULL; 1703 ib_sa_client_put(query->sa_query.client); 1704 free_mad(&query->sa_query); 1705 1706 err1: 1707 kfree(query); 1708 return ret; 1709 } 1710 1711 /* Support GuidInfoRecord */ 1712 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, 1713 int status, struct ib_sa_mad *mad) 1714 { 1715 struct ib_sa_guidinfo_query *query = 1716 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); 1717 1718 if (mad) { 1719 struct ib_sa_guidinfo_rec rec; 1720 1721 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), 1722 mad->data, &rec); 1723 query->callback(status, &rec, query->context); 1724 } else 1725 query->callback(status, NULL, query->context); 1726 } 1727 1728 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) 1729 { 1730 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); 1731 } 1732 1733 int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 1734 struct ib_device *device, u32 port_num, 1735 struct ib_sa_guidinfo_rec *rec, 1736 ib_sa_comp_mask comp_mask, u8 method, 1737 unsigned long timeout_ms, gfp_t gfp_mask, 1738 void (*callback)(int status, 1739 struct ib_sa_guidinfo_rec *resp, 1740 void *context), 1741 void *context, 1742 struct ib_sa_query **sa_query) 1743 { 1744 struct ib_sa_guidinfo_query *query; 1745 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1746 struct ib_sa_port *port; 1747 struct ib_mad_agent *agent; 1748 struct ib_sa_mad *mad; 1749 int ret; 1750 1751 if (!sa_dev) 1752 return -ENODEV; 1753 1754 if (method != IB_MGMT_METHOD_GET && 1755 method != IB_MGMT_METHOD_SET && 1756 method != IB_SA_METHOD_DELETE) { 1757 return -EINVAL; 1758 } 1759 1760 port = &sa_dev->port[port_num - sa_dev->start_port]; 1761 agent = port->agent; 1762 1763 query = kzalloc(sizeof(*query), gfp_mask); 1764 if (!query) 1765 return -ENOMEM; 1766 1767 query->sa_query.port = port; 1768 ret = alloc_mad(&query->sa_query, gfp_mask); 1769 if (ret) 1770 goto err1; 1771 1772 ib_sa_client_get(client); 1773 query->sa_query.client = client; 1774 query->callback = callback; 1775 query->context = context; 1776 1777 mad = query->sa_query.mad_buf->mad; 1778 init_mad(&query->sa_query, agent); 1779 1780 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; 1781 query->sa_query.release = ib_sa_guidinfo_rec_release; 1782 1783 mad->mad_hdr.method = method; 1784 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); 1785 mad->sa_hdr.comp_mask = comp_mask; 1786 1787 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, 1788 mad->data); 1789 1790 *sa_query = &query->sa_query; 1791 1792 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1793 if (ret < 0) 1794 goto err2; 1795 1796 return ret; 1797 1798 err2: 1799 *sa_query = NULL; 1800 ib_sa_client_put(query->sa_query.client); 1801 free_mad(&query->sa_query); 1802 1803 err1: 1804 kfree(query); 1805 return ret; 1806 } 1807 EXPORT_SYMBOL(ib_sa_guid_info_rec_query); 1808 1809 struct ib_classport_info_context { 1810 struct completion done; 1811 struct ib_sa_query *sa_query; 1812 }; 1813 1814 static void ib_classportinfo_cb(void *context) 1815 { 1816 struct ib_classport_info_context *cb_ctx = context; 1817 1818 complete(&cb_ctx->done); 1819 } 1820 1821 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, 1822 int status, struct ib_sa_mad *mad) 1823 { 1824 unsigned long flags; 1825 struct ib_sa_classport_info_query *query = 1826 container_of(sa_query, struct ib_sa_classport_info_query, sa_query); 1827 struct ib_sa_classport_cache *info = &sa_query->port->classport_info; 1828 1829 if (mad) { 1830 if (sa_query->flags & IB_SA_QUERY_OPA) { 1831 struct opa_class_port_info rec; 1832 1833 ib_unpack(opa_classport_info_rec_table, 1834 ARRAY_SIZE(opa_classport_info_rec_table), 1835 mad->data, &rec); 1836 1837 spin_lock_irqsave(&sa_query->port->classport_lock, 1838 flags); 1839 if (!status && !info->valid) { 1840 memcpy(&info->data.opa, &rec, 1841 sizeof(info->data.opa)); 1842 1843 info->valid = true; 1844 info->data.type = RDMA_CLASS_PORT_INFO_OPA; 1845 } 1846 spin_unlock_irqrestore(&sa_query->port->classport_lock, 1847 flags); 1848 1849 } else { 1850 struct ib_class_port_info rec; 1851 1852 ib_unpack(ib_classport_info_rec_table, 1853 ARRAY_SIZE(ib_classport_info_rec_table), 1854 mad->data, &rec); 1855 1856 spin_lock_irqsave(&sa_query->port->classport_lock, 1857 flags); 1858 if (!status && !info->valid) { 1859 memcpy(&info->data.ib, &rec, 1860 sizeof(info->data.ib)); 1861 1862 info->valid = true; 1863 info->data.type = RDMA_CLASS_PORT_INFO_IB; 1864 } 1865 spin_unlock_irqrestore(&sa_query->port->classport_lock, 1866 flags); 1867 } 1868 } 1869 query->callback(query->context); 1870 } 1871 1872 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query) 1873 { 1874 kfree(container_of(sa_query, struct ib_sa_classport_info_query, 1875 sa_query)); 1876 } 1877 1878 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port, 1879 unsigned long timeout_ms, 1880 void (*callback)(void *context), 1881 void *context, 1882 struct ib_sa_query **sa_query) 1883 { 1884 struct ib_mad_agent *agent; 1885 struct ib_sa_classport_info_query *query; 1886 struct ib_sa_mad *mad; 1887 gfp_t gfp_mask = GFP_KERNEL; 1888 int ret; 1889 1890 agent = port->agent; 1891 1892 query = kzalloc(sizeof(*query), gfp_mask); 1893 if (!query) 1894 return -ENOMEM; 1895 1896 query->sa_query.port = port; 1897 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device, 1898 port->port_num) ? 1899 IB_SA_QUERY_OPA : 0; 1900 ret = alloc_mad(&query->sa_query, gfp_mask); 1901 if (ret) 1902 goto err_free; 1903 1904 query->callback = callback; 1905 query->context = context; 1906 1907 mad = query->sa_query.mad_buf->mad; 1908 init_mad(&query->sa_query, agent); 1909 1910 query->sa_query.callback = ib_sa_classport_info_rec_callback; 1911 query->sa_query.release = ib_sa_classport_info_rec_release; 1912 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1913 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO); 1914 mad->sa_hdr.comp_mask = 0; 1915 *sa_query = &query->sa_query; 1916 1917 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1918 if (ret < 0) 1919 goto err_free_mad; 1920 1921 return ret; 1922 1923 err_free_mad: 1924 *sa_query = NULL; 1925 free_mad(&query->sa_query); 1926 1927 err_free: 1928 kfree(query); 1929 return ret; 1930 } 1931 1932 static void update_ib_cpi(struct work_struct *work) 1933 { 1934 struct ib_sa_port *port = 1935 container_of(work, struct ib_sa_port, ib_cpi_work.work); 1936 struct ib_classport_info_context *cb_context; 1937 unsigned long flags; 1938 int ret; 1939 1940 /* If the classport info is valid, nothing 1941 * to do here. 1942 */ 1943 spin_lock_irqsave(&port->classport_lock, flags); 1944 if (port->classport_info.valid) { 1945 spin_unlock_irqrestore(&port->classport_lock, flags); 1946 return; 1947 } 1948 spin_unlock_irqrestore(&port->classport_lock, flags); 1949 1950 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL); 1951 if (!cb_context) 1952 goto err_nomem; 1953 1954 init_completion(&cb_context->done); 1955 1956 ret = ib_sa_classport_info_rec_query(port, 3000, 1957 ib_classportinfo_cb, cb_context, 1958 &cb_context->sa_query); 1959 if (ret < 0) 1960 goto free_cb_err; 1961 wait_for_completion(&cb_context->done); 1962 free_cb_err: 1963 kfree(cb_context); 1964 spin_lock_irqsave(&port->classport_lock, flags); 1965 1966 /* If the classport info is still not valid, the query should have 1967 * failed for some reason. Retry issuing the query 1968 */ 1969 if (!port->classport_info.valid) { 1970 port->classport_info.retry_cnt++; 1971 if (port->classport_info.retry_cnt <= 1972 IB_SA_CPI_MAX_RETRY_CNT) { 1973 unsigned long delay = 1974 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); 1975 1976 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay); 1977 } 1978 } 1979 spin_unlock_irqrestore(&port->classport_lock, flags); 1980 1981 err_nomem: 1982 return; 1983 } 1984 1985 static void send_handler(struct ib_mad_agent *agent, 1986 struct ib_mad_send_wc *mad_send_wc) 1987 { 1988 struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 1989 unsigned long flags; 1990 1991 if (query->callback) 1992 switch (mad_send_wc->status) { 1993 case IB_WC_SUCCESS: 1994 /* No callback -- already got recv */ 1995 break; 1996 case IB_WC_RESP_TIMEOUT_ERR: 1997 query->callback(query, -ETIMEDOUT, NULL); 1998 break; 1999 case IB_WC_WR_FLUSH_ERR: 2000 query->callback(query, -EINTR, NULL); 2001 break; 2002 default: 2003 query->callback(query, -EIO, NULL); 2004 break; 2005 } 2006 2007 xa_lock_irqsave(&queries, flags); 2008 __xa_erase(&queries, query->id); 2009 xa_unlock_irqrestore(&queries, flags); 2010 2011 free_mad(query); 2012 if (query->client) 2013 ib_sa_client_put(query->client); 2014 query->release(query); 2015 } 2016 2017 static void recv_handler(struct ib_mad_agent *mad_agent, 2018 struct ib_mad_send_buf *send_buf, 2019 struct ib_mad_recv_wc *mad_recv_wc) 2020 { 2021 struct ib_sa_query *query; 2022 2023 if (!send_buf) 2024 return; 2025 2026 query = send_buf->context[0]; 2027 if (query->callback) { 2028 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 2029 query->callback(query, 2030 mad_recv_wc->recv_buf.mad->mad_hdr.status ? 2031 -EINVAL : 0, 2032 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); 2033 else 2034 query->callback(query, -EIO, NULL); 2035 } 2036 2037 ib_free_recv_mad(mad_recv_wc); 2038 } 2039 2040 static void update_sm_ah(struct work_struct *work) 2041 { 2042 struct ib_sa_port *port = 2043 container_of(work, struct ib_sa_port, update_task); 2044 struct ib_sa_sm_ah *new_ah; 2045 struct ib_port_attr port_attr; 2046 struct rdma_ah_attr ah_attr; 2047 bool grh_required; 2048 2049 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { 2050 pr_warn("Couldn't query port\n"); 2051 return; 2052 } 2053 2054 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL); 2055 if (!new_ah) 2056 return; 2057 2058 kref_init(&new_ah->ref); 2059 new_ah->src_path_mask = (1 << port_attr.lmc) - 1; 2060 2061 new_ah->pkey_index = 0; 2062 if (ib_find_pkey(port->agent->device, port->port_num, 2063 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) 2064 pr_err("Couldn't find index for default PKey\n"); 2065 2066 memset(&ah_attr, 0, sizeof(ah_attr)); 2067 ah_attr.type = rdma_ah_find_type(port->agent->device, 2068 port->port_num); 2069 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid); 2070 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl); 2071 rdma_ah_set_port_num(&ah_attr, port->port_num); 2072 2073 grh_required = rdma_is_grh_required(port->agent->device, 2074 port->port_num); 2075 2076 /* 2077 * The OPA sm_lid of 0xFFFF needs special handling so that it can be 2078 * differentiated from a permissive LID of 0xFFFF. We set the 2079 * grh_required flag here so the SA can program the DGID in the 2080 * address handle appropriately 2081 */ 2082 if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA && 2083 (grh_required || 2084 port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE))) 2085 rdma_ah_set_make_grd(&ah_attr, true); 2086 2087 if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) { 2088 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH); 2089 rdma_ah_set_subnet_prefix(&ah_attr, 2090 cpu_to_be64(port_attr.subnet_prefix)); 2091 rdma_ah_set_interface_id(&ah_attr, 2092 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)); 2093 } 2094 2095 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr, 2096 RDMA_CREATE_AH_SLEEPABLE); 2097 if (IS_ERR(new_ah->ah)) { 2098 pr_warn("Couldn't create new SM AH\n"); 2099 kfree(new_ah); 2100 return; 2101 } 2102 2103 spin_lock_irq(&port->ah_lock); 2104 if (port->sm_ah) 2105 kref_put(&port->sm_ah->ref, free_sm_ah); 2106 port->sm_ah = new_ah; 2107 spin_unlock_irq(&port->ah_lock); 2108 } 2109 2110 static void ib_sa_event(struct ib_event_handler *handler, 2111 struct ib_event *event) 2112 { 2113 if (event->event == IB_EVENT_PORT_ERR || 2114 event->event == IB_EVENT_PORT_ACTIVE || 2115 event->event == IB_EVENT_LID_CHANGE || 2116 event->event == IB_EVENT_PKEY_CHANGE || 2117 event->event == IB_EVENT_SM_CHANGE || 2118 event->event == IB_EVENT_CLIENT_REREGISTER) { 2119 unsigned long flags; 2120 struct ib_sa_device *sa_dev = 2121 container_of(handler, typeof(*sa_dev), event_handler); 2122 u32 port_num = event->element.port_num - sa_dev->start_port; 2123 struct ib_sa_port *port = &sa_dev->port[port_num]; 2124 2125 if (!rdma_cap_ib_sa(handler->device, port->port_num)) 2126 return; 2127 2128 spin_lock_irqsave(&port->ah_lock, flags); 2129 if (port->sm_ah) 2130 kref_put(&port->sm_ah->ref, free_sm_ah); 2131 port->sm_ah = NULL; 2132 spin_unlock_irqrestore(&port->ah_lock, flags); 2133 2134 if (event->event == IB_EVENT_SM_CHANGE || 2135 event->event == IB_EVENT_CLIENT_REREGISTER || 2136 event->event == IB_EVENT_LID_CHANGE || 2137 event->event == IB_EVENT_PORT_ACTIVE) { 2138 unsigned long delay = 2139 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); 2140 2141 spin_lock_irqsave(&port->classport_lock, flags); 2142 port->classport_info.valid = false; 2143 port->classport_info.retry_cnt = 0; 2144 spin_unlock_irqrestore(&port->classport_lock, flags); 2145 queue_delayed_work(ib_wq, 2146 &port->ib_cpi_work, delay); 2147 } 2148 queue_work(ib_wq, &sa_dev->port[port_num].update_task); 2149 } 2150 } 2151 2152 static int ib_sa_add_one(struct ib_device *device) 2153 { 2154 struct ib_sa_device *sa_dev; 2155 int s, e, i; 2156 int count = 0; 2157 int ret; 2158 2159 s = rdma_start_port(device); 2160 e = rdma_end_port(device); 2161 2162 sa_dev = kzalloc(struct_size(sa_dev, port, 2163 size_add(size_sub(e, s), 1)), 2164 GFP_KERNEL); 2165 if (!sa_dev) 2166 return -ENOMEM; 2167 2168 sa_dev->start_port = s; 2169 sa_dev->end_port = e; 2170 2171 for (i = 0; i <= e - s; ++i) { 2172 spin_lock_init(&sa_dev->port[i].ah_lock); 2173 if (!rdma_cap_ib_sa(device, i + 1)) 2174 continue; 2175 2176 sa_dev->port[i].sm_ah = NULL; 2177 sa_dev->port[i].port_num = i + s; 2178 2179 spin_lock_init(&sa_dev->port[i].classport_lock); 2180 sa_dev->port[i].classport_info.valid = false; 2181 2182 sa_dev->port[i].agent = 2183 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 2184 NULL, 0, send_handler, 2185 recv_handler, sa_dev, 0); 2186 if (IS_ERR(sa_dev->port[i].agent)) { 2187 ret = PTR_ERR(sa_dev->port[i].agent); 2188 goto err; 2189 } 2190 2191 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 2192 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work, 2193 update_ib_cpi); 2194 2195 count++; 2196 } 2197 2198 if (!count) { 2199 ret = -EOPNOTSUPP; 2200 goto free; 2201 } 2202 2203 ib_set_client_data(device, &sa_client, sa_dev); 2204 2205 /* 2206 * We register our event handler after everything is set up, 2207 * and then update our cached info after the event handler is 2208 * registered to avoid any problems if a port changes state 2209 * during our initialization. 2210 */ 2211 2212 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); 2213 ib_register_event_handler(&sa_dev->event_handler); 2214 2215 for (i = 0; i <= e - s; ++i) { 2216 if (rdma_cap_ib_sa(device, i + 1)) 2217 update_sm_ah(&sa_dev->port[i].update_task); 2218 } 2219 2220 return 0; 2221 2222 err: 2223 while (--i >= 0) { 2224 if (rdma_cap_ib_sa(device, i + 1)) 2225 ib_unregister_mad_agent(sa_dev->port[i].agent); 2226 } 2227 free: 2228 kfree(sa_dev); 2229 return ret; 2230 } 2231 2232 static void ib_sa_remove_one(struct ib_device *device, void *client_data) 2233 { 2234 struct ib_sa_device *sa_dev = client_data; 2235 int i; 2236 2237 ib_unregister_event_handler(&sa_dev->event_handler); 2238 flush_workqueue(ib_wq); 2239 2240 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 2241 if (rdma_cap_ib_sa(device, i + 1)) { 2242 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work); 2243 ib_unregister_mad_agent(sa_dev->port[i].agent); 2244 if (sa_dev->port[i].sm_ah) 2245 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 2246 } 2247 2248 } 2249 2250 kfree(sa_dev); 2251 } 2252 2253 int ib_sa_init(void) 2254 { 2255 int ret; 2256 2257 get_random_bytes(&tid, sizeof tid); 2258 2259 atomic_set(&ib_nl_sa_request_seq, 0); 2260 2261 ret = ib_register_client(&sa_client); 2262 if (ret) { 2263 pr_err("Couldn't register ib_sa client\n"); 2264 goto err1; 2265 } 2266 2267 ret = mcast_init(); 2268 if (ret) { 2269 pr_err("Couldn't initialize multicast handling\n"); 2270 goto err2; 2271 } 2272 2273 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM); 2274 if (!ib_nl_wq) { 2275 ret = -ENOMEM; 2276 goto err3; 2277 } 2278 2279 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); 2280 2281 return 0; 2282 2283 err3: 2284 mcast_cleanup(); 2285 err2: 2286 ib_unregister_client(&sa_client); 2287 err1: 2288 return ret; 2289 } 2290 2291 void ib_sa_cleanup(void) 2292 { 2293 cancel_delayed_work(&ib_nl_timed_work); 2294 destroy_workqueue(ib_nl_wq); 2295 mcast_cleanup(); 2296 ib_unregister_client(&sa_client); 2297 WARN_ON(!xa_empty(&queries)); 2298 } 2299