1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/err.h> 38 #include <linux/random.h> 39 #include <linux/spinlock.h> 40 #include <linux/slab.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kref.h> 43 #include <linux/xarray.h> 44 #include <linux/workqueue.h> 45 #include <uapi/linux/if_ether.h> 46 #include <rdma/ib_pack.h> 47 #include <rdma/ib_cache.h> 48 #include <rdma/rdma_netlink.h> 49 #include <net/netlink.h> 50 #include <uapi/rdma/ib_user_sa.h> 51 #include <rdma/ib_marshall.h> 52 #include <rdma/ib_addr.h> 53 #include <rdma/opa_addr.h> 54 #include "sa.h" 55 #include "core_priv.h" 56 57 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 58 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 59 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 60 #define IB_SA_CPI_MAX_RETRY_CNT 3 61 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */ 62 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT; 63 64 struct ib_sa_sm_ah { 65 struct ib_ah *ah; 66 struct kref ref; 67 u16 pkey_index; 68 u8 src_path_mask; 69 }; 70 71 enum rdma_class_port_info_type { 72 RDMA_CLASS_PORT_INFO_IB, 73 RDMA_CLASS_PORT_INFO_OPA 74 }; 75 76 struct rdma_class_port_info { 77 enum rdma_class_port_info_type type; 78 union { 79 struct ib_class_port_info ib; 80 struct opa_class_port_info opa; 81 }; 82 }; 83 84 struct ib_sa_classport_cache { 85 bool valid; 86 int retry_cnt; 87 struct rdma_class_port_info data; 88 }; 89 90 struct ib_sa_port { 91 struct ib_mad_agent *agent; 92 struct ib_sa_sm_ah *sm_ah; 93 struct work_struct update_task; 94 struct ib_sa_classport_cache classport_info; 95 struct delayed_work ib_cpi_work; 96 spinlock_t classport_lock; /* protects class port info set */ 97 spinlock_t ah_lock; 98 u8 port_num; 99 }; 100 101 struct ib_sa_device { 102 int start_port, end_port; 103 struct ib_event_handler event_handler; 104 struct ib_sa_port port[]; 105 }; 106 107 struct ib_sa_query { 108 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); 109 void (*release)(struct ib_sa_query *); 110 struct ib_sa_client *client; 111 struct ib_sa_port *port; 112 struct ib_mad_send_buf *mad_buf; 113 struct ib_sa_sm_ah *sm_ah; 114 int id; 115 u32 flags; 116 struct list_head list; /* Local svc request list */ 117 u32 seq; /* Local svc request sequence number */ 118 unsigned long timeout; /* Local svc timeout */ 119 u8 path_use; /* How will the pathrecord be used */ 120 }; 121 122 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 123 #define IB_SA_CANCEL 0x00000002 124 #define IB_SA_QUERY_OPA 0x00000004 125 126 struct ib_sa_service_query { 127 void (*callback)(int, struct ib_sa_service_rec *, void *); 128 void *context; 129 struct ib_sa_query sa_query; 130 }; 131 132 struct ib_sa_path_query { 133 void (*callback)(int, struct sa_path_rec *, void *); 134 void *context; 135 struct ib_sa_query sa_query; 136 struct sa_path_rec *conv_pr; 137 }; 138 139 struct ib_sa_guidinfo_query { 140 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); 141 void *context; 142 struct ib_sa_query sa_query; 143 }; 144 145 struct ib_sa_classport_info_query { 146 void (*callback)(void *); 147 void *context; 148 struct ib_sa_query sa_query; 149 }; 150 151 struct ib_sa_mcmember_query { 152 void (*callback)(int, struct ib_sa_mcmember_rec *, void *); 153 void *context; 154 struct ib_sa_query sa_query; 155 }; 156 157 static LIST_HEAD(ib_nl_request_list); 158 static DEFINE_SPINLOCK(ib_nl_request_lock); 159 static atomic_t ib_nl_sa_request_seq; 160 static struct workqueue_struct *ib_nl_wq; 161 static struct delayed_work ib_nl_timed_work; 162 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = { 163 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY, 164 .len = sizeof(struct ib_path_rec_data)}, 165 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32}, 166 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64}, 167 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, 168 .len = sizeof(struct rdma_nla_ls_gid)}, 169 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY, 170 .len = sizeof(struct rdma_nla_ls_gid)}, 171 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8}, 172 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16}, 173 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16}, 174 }; 175 176 177 static int ib_sa_add_one(struct ib_device *device); 178 static void ib_sa_remove_one(struct ib_device *device, void *client_data); 179 180 static struct ib_client sa_client = { 181 .name = "sa", 182 .add = ib_sa_add_one, 183 .remove = ib_sa_remove_one 184 }; 185 186 static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); 187 188 static DEFINE_SPINLOCK(tid_lock); 189 static u32 tid; 190 191 #define PATH_REC_FIELD(field) \ 192 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \ 193 .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \ 194 .field_name = "sa_path_rec:" #field 195 196 static const struct ib_field path_rec_table[] = { 197 { PATH_REC_FIELD(service_id), 198 .offset_words = 0, 199 .offset_bits = 0, 200 .size_bits = 64 }, 201 { PATH_REC_FIELD(dgid), 202 .offset_words = 2, 203 .offset_bits = 0, 204 .size_bits = 128 }, 205 { PATH_REC_FIELD(sgid), 206 .offset_words = 6, 207 .offset_bits = 0, 208 .size_bits = 128 }, 209 { PATH_REC_FIELD(ib.dlid), 210 .offset_words = 10, 211 .offset_bits = 0, 212 .size_bits = 16 }, 213 { PATH_REC_FIELD(ib.slid), 214 .offset_words = 10, 215 .offset_bits = 16, 216 .size_bits = 16 }, 217 { PATH_REC_FIELD(ib.raw_traffic), 218 .offset_words = 11, 219 .offset_bits = 0, 220 .size_bits = 1 }, 221 { RESERVED, 222 .offset_words = 11, 223 .offset_bits = 1, 224 .size_bits = 3 }, 225 { PATH_REC_FIELD(flow_label), 226 .offset_words = 11, 227 .offset_bits = 4, 228 .size_bits = 20 }, 229 { PATH_REC_FIELD(hop_limit), 230 .offset_words = 11, 231 .offset_bits = 24, 232 .size_bits = 8 }, 233 { PATH_REC_FIELD(traffic_class), 234 .offset_words = 12, 235 .offset_bits = 0, 236 .size_bits = 8 }, 237 { PATH_REC_FIELD(reversible), 238 .offset_words = 12, 239 .offset_bits = 8, 240 .size_bits = 1 }, 241 { PATH_REC_FIELD(numb_path), 242 .offset_words = 12, 243 .offset_bits = 9, 244 .size_bits = 7 }, 245 { PATH_REC_FIELD(pkey), 246 .offset_words = 12, 247 .offset_bits = 16, 248 .size_bits = 16 }, 249 { PATH_REC_FIELD(qos_class), 250 .offset_words = 13, 251 .offset_bits = 0, 252 .size_bits = 12 }, 253 { PATH_REC_FIELD(sl), 254 .offset_words = 13, 255 .offset_bits = 12, 256 .size_bits = 4 }, 257 { PATH_REC_FIELD(mtu_selector), 258 .offset_words = 13, 259 .offset_bits = 16, 260 .size_bits = 2 }, 261 { PATH_REC_FIELD(mtu), 262 .offset_words = 13, 263 .offset_bits = 18, 264 .size_bits = 6 }, 265 { PATH_REC_FIELD(rate_selector), 266 .offset_words = 13, 267 .offset_bits = 24, 268 .size_bits = 2 }, 269 { PATH_REC_FIELD(rate), 270 .offset_words = 13, 271 .offset_bits = 26, 272 .size_bits = 6 }, 273 { PATH_REC_FIELD(packet_life_time_selector), 274 .offset_words = 14, 275 .offset_bits = 0, 276 .size_bits = 2 }, 277 { PATH_REC_FIELD(packet_life_time), 278 .offset_words = 14, 279 .offset_bits = 2, 280 .size_bits = 6 }, 281 { PATH_REC_FIELD(preference), 282 .offset_words = 14, 283 .offset_bits = 8, 284 .size_bits = 8 }, 285 { RESERVED, 286 .offset_words = 14, 287 .offset_bits = 16, 288 .size_bits = 48 }, 289 }; 290 291 #define OPA_PATH_REC_FIELD(field) \ 292 .struct_offset_bytes = \ 293 offsetof(struct sa_path_rec, field), \ 294 .struct_size_bytes = \ 295 sizeof_field(struct sa_path_rec, field), \ 296 .field_name = "sa_path_rec:" #field 297 298 static const struct ib_field opa_path_rec_table[] = { 299 { OPA_PATH_REC_FIELD(service_id), 300 .offset_words = 0, 301 .offset_bits = 0, 302 .size_bits = 64 }, 303 { OPA_PATH_REC_FIELD(dgid), 304 .offset_words = 2, 305 .offset_bits = 0, 306 .size_bits = 128 }, 307 { OPA_PATH_REC_FIELD(sgid), 308 .offset_words = 6, 309 .offset_bits = 0, 310 .size_bits = 128 }, 311 { OPA_PATH_REC_FIELD(opa.dlid), 312 .offset_words = 10, 313 .offset_bits = 0, 314 .size_bits = 32 }, 315 { OPA_PATH_REC_FIELD(opa.slid), 316 .offset_words = 11, 317 .offset_bits = 0, 318 .size_bits = 32 }, 319 { OPA_PATH_REC_FIELD(opa.raw_traffic), 320 .offset_words = 12, 321 .offset_bits = 0, 322 .size_bits = 1 }, 323 { RESERVED, 324 .offset_words = 12, 325 .offset_bits = 1, 326 .size_bits = 3 }, 327 { OPA_PATH_REC_FIELD(flow_label), 328 .offset_words = 12, 329 .offset_bits = 4, 330 .size_bits = 20 }, 331 { OPA_PATH_REC_FIELD(hop_limit), 332 .offset_words = 12, 333 .offset_bits = 24, 334 .size_bits = 8 }, 335 { OPA_PATH_REC_FIELD(traffic_class), 336 .offset_words = 13, 337 .offset_bits = 0, 338 .size_bits = 8 }, 339 { OPA_PATH_REC_FIELD(reversible), 340 .offset_words = 13, 341 .offset_bits = 8, 342 .size_bits = 1 }, 343 { OPA_PATH_REC_FIELD(numb_path), 344 .offset_words = 13, 345 .offset_bits = 9, 346 .size_bits = 7 }, 347 { OPA_PATH_REC_FIELD(pkey), 348 .offset_words = 13, 349 .offset_bits = 16, 350 .size_bits = 16 }, 351 { OPA_PATH_REC_FIELD(opa.l2_8B), 352 .offset_words = 14, 353 .offset_bits = 0, 354 .size_bits = 1 }, 355 { OPA_PATH_REC_FIELD(opa.l2_10B), 356 .offset_words = 14, 357 .offset_bits = 1, 358 .size_bits = 1 }, 359 { OPA_PATH_REC_FIELD(opa.l2_9B), 360 .offset_words = 14, 361 .offset_bits = 2, 362 .size_bits = 1 }, 363 { OPA_PATH_REC_FIELD(opa.l2_16B), 364 .offset_words = 14, 365 .offset_bits = 3, 366 .size_bits = 1 }, 367 { RESERVED, 368 .offset_words = 14, 369 .offset_bits = 4, 370 .size_bits = 2 }, 371 { OPA_PATH_REC_FIELD(opa.qos_type), 372 .offset_words = 14, 373 .offset_bits = 6, 374 .size_bits = 2 }, 375 { OPA_PATH_REC_FIELD(opa.qos_priority), 376 .offset_words = 14, 377 .offset_bits = 8, 378 .size_bits = 8 }, 379 { RESERVED, 380 .offset_words = 14, 381 .offset_bits = 16, 382 .size_bits = 3 }, 383 { OPA_PATH_REC_FIELD(sl), 384 .offset_words = 14, 385 .offset_bits = 19, 386 .size_bits = 5 }, 387 { RESERVED, 388 .offset_words = 14, 389 .offset_bits = 24, 390 .size_bits = 8 }, 391 { OPA_PATH_REC_FIELD(mtu_selector), 392 .offset_words = 15, 393 .offset_bits = 0, 394 .size_bits = 2 }, 395 { OPA_PATH_REC_FIELD(mtu), 396 .offset_words = 15, 397 .offset_bits = 2, 398 .size_bits = 6 }, 399 { OPA_PATH_REC_FIELD(rate_selector), 400 .offset_words = 15, 401 .offset_bits = 8, 402 .size_bits = 2 }, 403 { OPA_PATH_REC_FIELD(rate), 404 .offset_words = 15, 405 .offset_bits = 10, 406 .size_bits = 6 }, 407 { OPA_PATH_REC_FIELD(packet_life_time_selector), 408 .offset_words = 15, 409 .offset_bits = 16, 410 .size_bits = 2 }, 411 { OPA_PATH_REC_FIELD(packet_life_time), 412 .offset_words = 15, 413 .offset_bits = 18, 414 .size_bits = 6 }, 415 { OPA_PATH_REC_FIELD(preference), 416 .offset_words = 15, 417 .offset_bits = 24, 418 .size_bits = 8 }, 419 }; 420 421 #define MCMEMBER_REC_FIELD(field) \ 422 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ 423 .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \ 424 .field_name = "sa_mcmember_rec:" #field 425 426 static const struct ib_field mcmember_rec_table[] = { 427 { MCMEMBER_REC_FIELD(mgid), 428 .offset_words = 0, 429 .offset_bits = 0, 430 .size_bits = 128 }, 431 { MCMEMBER_REC_FIELD(port_gid), 432 .offset_words = 4, 433 .offset_bits = 0, 434 .size_bits = 128 }, 435 { MCMEMBER_REC_FIELD(qkey), 436 .offset_words = 8, 437 .offset_bits = 0, 438 .size_bits = 32 }, 439 { MCMEMBER_REC_FIELD(mlid), 440 .offset_words = 9, 441 .offset_bits = 0, 442 .size_bits = 16 }, 443 { MCMEMBER_REC_FIELD(mtu_selector), 444 .offset_words = 9, 445 .offset_bits = 16, 446 .size_bits = 2 }, 447 { MCMEMBER_REC_FIELD(mtu), 448 .offset_words = 9, 449 .offset_bits = 18, 450 .size_bits = 6 }, 451 { MCMEMBER_REC_FIELD(traffic_class), 452 .offset_words = 9, 453 .offset_bits = 24, 454 .size_bits = 8 }, 455 { MCMEMBER_REC_FIELD(pkey), 456 .offset_words = 10, 457 .offset_bits = 0, 458 .size_bits = 16 }, 459 { MCMEMBER_REC_FIELD(rate_selector), 460 .offset_words = 10, 461 .offset_bits = 16, 462 .size_bits = 2 }, 463 { MCMEMBER_REC_FIELD(rate), 464 .offset_words = 10, 465 .offset_bits = 18, 466 .size_bits = 6 }, 467 { MCMEMBER_REC_FIELD(packet_life_time_selector), 468 .offset_words = 10, 469 .offset_bits = 24, 470 .size_bits = 2 }, 471 { MCMEMBER_REC_FIELD(packet_life_time), 472 .offset_words = 10, 473 .offset_bits = 26, 474 .size_bits = 6 }, 475 { MCMEMBER_REC_FIELD(sl), 476 .offset_words = 11, 477 .offset_bits = 0, 478 .size_bits = 4 }, 479 { MCMEMBER_REC_FIELD(flow_label), 480 .offset_words = 11, 481 .offset_bits = 4, 482 .size_bits = 20 }, 483 { MCMEMBER_REC_FIELD(hop_limit), 484 .offset_words = 11, 485 .offset_bits = 24, 486 .size_bits = 8 }, 487 { MCMEMBER_REC_FIELD(scope), 488 .offset_words = 12, 489 .offset_bits = 0, 490 .size_bits = 4 }, 491 { MCMEMBER_REC_FIELD(join_state), 492 .offset_words = 12, 493 .offset_bits = 4, 494 .size_bits = 4 }, 495 { MCMEMBER_REC_FIELD(proxy_join), 496 .offset_words = 12, 497 .offset_bits = 8, 498 .size_bits = 1 }, 499 { RESERVED, 500 .offset_words = 12, 501 .offset_bits = 9, 502 .size_bits = 23 }, 503 }; 504 505 #define SERVICE_REC_FIELD(field) \ 506 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \ 507 .struct_size_bytes = sizeof_field(struct ib_sa_service_rec, field), \ 508 .field_name = "sa_service_rec:" #field 509 510 static const struct ib_field service_rec_table[] = { 511 { SERVICE_REC_FIELD(id), 512 .offset_words = 0, 513 .offset_bits = 0, 514 .size_bits = 64 }, 515 { SERVICE_REC_FIELD(gid), 516 .offset_words = 2, 517 .offset_bits = 0, 518 .size_bits = 128 }, 519 { SERVICE_REC_FIELD(pkey), 520 .offset_words = 6, 521 .offset_bits = 0, 522 .size_bits = 16 }, 523 { SERVICE_REC_FIELD(lease), 524 .offset_words = 7, 525 .offset_bits = 0, 526 .size_bits = 32 }, 527 { SERVICE_REC_FIELD(key), 528 .offset_words = 8, 529 .offset_bits = 0, 530 .size_bits = 128 }, 531 { SERVICE_REC_FIELD(name), 532 .offset_words = 12, 533 .offset_bits = 0, 534 .size_bits = 64*8 }, 535 { SERVICE_REC_FIELD(data8), 536 .offset_words = 28, 537 .offset_bits = 0, 538 .size_bits = 16*8 }, 539 { SERVICE_REC_FIELD(data16), 540 .offset_words = 32, 541 .offset_bits = 0, 542 .size_bits = 8*16 }, 543 { SERVICE_REC_FIELD(data32), 544 .offset_words = 36, 545 .offset_bits = 0, 546 .size_bits = 4*32 }, 547 { SERVICE_REC_FIELD(data64), 548 .offset_words = 40, 549 .offset_bits = 0, 550 .size_bits = 2*64 }, 551 }; 552 553 #define CLASSPORTINFO_REC_FIELD(field) \ 554 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \ 555 .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \ 556 .field_name = "ib_class_port_info:" #field 557 558 static const struct ib_field ib_classport_info_rec_table[] = { 559 { CLASSPORTINFO_REC_FIELD(base_version), 560 .offset_words = 0, 561 .offset_bits = 0, 562 .size_bits = 8 }, 563 { CLASSPORTINFO_REC_FIELD(class_version), 564 .offset_words = 0, 565 .offset_bits = 8, 566 .size_bits = 8 }, 567 { CLASSPORTINFO_REC_FIELD(capability_mask), 568 .offset_words = 0, 569 .offset_bits = 16, 570 .size_bits = 16 }, 571 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 572 .offset_words = 1, 573 .offset_bits = 0, 574 .size_bits = 32 }, 575 { CLASSPORTINFO_REC_FIELD(redirect_gid), 576 .offset_words = 2, 577 .offset_bits = 0, 578 .size_bits = 128 }, 579 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl), 580 .offset_words = 6, 581 .offset_bits = 0, 582 .size_bits = 32 }, 583 { CLASSPORTINFO_REC_FIELD(redirect_lid), 584 .offset_words = 7, 585 .offset_bits = 0, 586 .size_bits = 16 }, 587 { CLASSPORTINFO_REC_FIELD(redirect_pkey), 588 .offset_words = 7, 589 .offset_bits = 16, 590 .size_bits = 16 }, 591 592 { CLASSPORTINFO_REC_FIELD(redirect_qp), 593 .offset_words = 8, 594 .offset_bits = 0, 595 .size_bits = 32 }, 596 { CLASSPORTINFO_REC_FIELD(redirect_qkey), 597 .offset_words = 9, 598 .offset_bits = 0, 599 .size_bits = 32 }, 600 601 { CLASSPORTINFO_REC_FIELD(trap_gid), 602 .offset_words = 10, 603 .offset_bits = 0, 604 .size_bits = 128 }, 605 { CLASSPORTINFO_REC_FIELD(trap_tcslfl), 606 .offset_words = 14, 607 .offset_bits = 0, 608 .size_bits = 32 }, 609 610 { CLASSPORTINFO_REC_FIELD(trap_lid), 611 .offset_words = 15, 612 .offset_bits = 0, 613 .size_bits = 16 }, 614 { CLASSPORTINFO_REC_FIELD(trap_pkey), 615 .offset_words = 15, 616 .offset_bits = 16, 617 .size_bits = 16 }, 618 619 { CLASSPORTINFO_REC_FIELD(trap_hlqp), 620 .offset_words = 16, 621 .offset_bits = 0, 622 .size_bits = 32 }, 623 { CLASSPORTINFO_REC_FIELD(trap_qkey), 624 .offset_words = 17, 625 .offset_bits = 0, 626 .size_bits = 32 }, 627 }; 628 629 #define OPA_CLASSPORTINFO_REC_FIELD(field) \ 630 .struct_offset_bytes =\ 631 offsetof(struct opa_class_port_info, field), \ 632 .struct_size_bytes = \ 633 sizeof_field(struct opa_class_port_info, field), \ 634 .field_name = "opa_class_port_info:" #field 635 636 static const struct ib_field opa_classport_info_rec_table[] = { 637 { OPA_CLASSPORTINFO_REC_FIELD(base_version), 638 .offset_words = 0, 639 .offset_bits = 0, 640 .size_bits = 8 }, 641 { OPA_CLASSPORTINFO_REC_FIELD(class_version), 642 .offset_words = 0, 643 .offset_bits = 8, 644 .size_bits = 8 }, 645 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask), 646 .offset_words = 0, 647 .offset_bits = 16, 648 .size_bits = 16 }, 649 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 650 .offset_words = 1, 651 .offset_bits = 0, 652 .size_bits = 32 }, 653 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid), 654 .offset_words = 2, 655 .offset_bits = 0, 656 .size_bits = 128 }, 657 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl), 658 .offset_words = 6, 659 .offset_bits = 0, 660 .size_bits = 32 }, 661 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid), 662 .offset_words = 7, 663 .offset_bits = 0, 664 .size_bits = 32 }, 665 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp), 666 .offset_words = 8, 667 .offset_bits = 0, 668 .size_bits = 32 }, 669 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey), 670 .offset_words = 9, 671 .offset_bits = 0, 672 .size_bits = 32 }, 673 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid), 674 .offset_words = 10, 675 .offset_bits = 0, 676 .size_bits = 128 }, 677 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl), 678 .offset_words = 14, 679 .offset_bits = 0, 680 .size_bits = 32 }, 681 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid), 682 .offset_words = 15, 683 .offset_bits = 0, 684 .size_bits = 32 }, 685 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp), 686 .offset_words = 16, 687 .offset_bits = 0, 688 .size_bits = 32 }, 689 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey), 690 .offset_words = 17, 691 .offset_bits = 0, 692 .size_bits = 32 }, 693 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey), 694 .offset_words = 18, 695 .offset_bits = 0, 696 .size_bits = 16 }, 697 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey), 698 .offset_words = 18, 699 .offset_bits = 16, 700 .size_bits = 16 }, 701 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd), 702 .offset_words = 19, 703 .offset_bits = 0, 704 .size_bits = 8 }, 705 { RESERVED, 706 .offset_words = 19, 707 .offset_bits = 8, 708 .size_bits = 24 }, 709 }; 710 711 #define GUIDINFO_REC_FIELD(field) \ 712 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ 713 .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \ 714 .field_name = "sa_guidinfo_rec:" #field 715 716 static const struct ib_field guidinfo_rec_table[] = { 717 { GUIDINFO_REC_FIELD(lid), 718 .offset_words = 0, 719 .offset_bits = 0, 720 .size_bits = 16 }, 721 { GUIDINFO_REC_FIELD(block_num), 722 .offset_words = 0, 723 .offset_bits = 16, 724 .size_bits = 8 }, 725 { GUIDINFO_REC_FIELD(res1), 726 .offset_words = 0, 727 .offset_bits = 24, 728 .size_bits = 8 }, 729 { GUIDINFO_REC_FIELD(res2), 730 .offset_words = 1, 731 .offset_bits = 0, 732 .size_bits = 32 }, 733 { GUIDINFO_REC_FIELD(guid_info_list), 734 .offset_words = 2, 735 .offset_bits = 0, 736 .size_bits = 512 }, 737 }; 738 739 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) 740 { 741 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; 742 } 743 744 static inline int ib_sa_query_cancelled(struct ib_sa_query *query) 745 { 746 return (query->flags & IB_SA_CANCEL); 747 } 748 749 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, 750 struct ib_sa_query *query) 751 { 752 struct sa_path_rec *sa_rec = query->mad_buf->context[1]; 753 struct ib_sa_mad *mad = query->mad_buf->mad; 754 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; 755 u16 val16; 756 u64 val64; 757 struct rdma_ls_resolve_header *header; 758 759 query->mad_buf->context[1] = NULL; 760 761 /* Construct the family header first */ 762 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 763 memcpy(header->device_name, dev_name(&query->port->agent->device->dev), 764 LS_DEVICE_NAME_MAX); 765 header->port_num = query->port->port_num; 766 767 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && 768 sa_rec->reversible != 0) 769 query->path_use = LS_RESOLVE_PATH_USE_GMP; 770 else 771 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; 772 header->path_use = query->path_use; 773 774 /* Now build the attributes */ 775 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 776 val64 = be64_to_cpu(sa_rec->service_id); 777 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 778 sizeof(val64), &val64); 779 } 780 if (comp_mask & IB_SA_PATH_REC_DGID) 781 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID, 782 sizeof(sa_rec->dgid), &sa_rec->dgid); 783 if (comp_mask & IB_SA_PATH_REC_SGID) 784 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID, 785 sizeof(sa_rec->sgid), &sa_rec->sgid); 786 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 787 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS, 788 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class); 789 790 if (comp_mask & IB_SA_PATH_REC_PKEY) { 791 val16 = be16_to_cpu(sa_rec->pkey); 792 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY, 793 sizeof(val16), &val16); 794 } 795 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) { 796 val16 = be16_to_cpu(sa_rec->qos_class); 797 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS, 798 sizeof(val16), &val16); 799 } 800 } 801 802 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) 803 { 804 int len = 0; 805 806 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) 807 len += nla_total_size(sizeof(u64)); 808 if (comp_mask & IB_SA_PATH_REC_DGID) 809 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 810 if (comp_mask & IB_SA_PATH_REC_SGID) 811 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 812 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 813 len += nla_total_size(sizeof(u8)); 814 if (comp_mask & IB_SA_PATH_REC_PKEY) 815 len += nla_total_size(sizeof(u16)); 816 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) 817 len += nla_total_size(sizeof(u16)); 818 819 /* 820 * Make sure that at least some of the required comp_mask bits are 821 * set. 822 */ 823 if (WARN_ON(len == 0)) 824 return len; 825 826 /* Add the family header */ 827 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header)); 828 829 return len; 830 } 831 832 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) 833 { 834 struct sk_buff *skb = NULL; 835 struct nlmsghdr *nlh; 836 void *data; 837 struct ib_sa_mad *mad; 838 int len; 839 840 mad = query->mad_buf->mad; 841 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); 842 if (len <= 0) 843 return -EMSGSIZE; 844 845 skb = nlmsg_new(len, gfp_mask); 846 if (!skb) 847 return -ENOMEM; 848 849 /* Put nlmsg header only for now */ 850 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, 851 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); 852 if (!data) { 853 nlmsg_free(skb); 854 return -EMSGSIZE; 855 } 856 857 /* Add attributes */ 858 ib_nl_set_path_rec_attrs(skb, query); 859 860 /* Repair the nlmsg header length */ 861 nlmsg_end(skb, nlh); 862 863 return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask); 864 } 865 866 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) 867 { 868 unsigned long flags; 869 unsigned long delay; 870 int ret; 871 872 INIT_LIST_HEAD(&query->list); 873 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 874 875 /* Put the request on the list first.*/ 876 spin_lock_irqsave(&ib_nl_request_lock, flags); 877 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 878 query->timeout = delay + jiffies; 879 list_add_tail(&query->list, &ib_nl_request_list); 880 /* Start the timeout if this is the only request */ 881 if (ib_nl_request_list.next == &query->list) 882 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 883 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 884 885 ret = ib_nl_send_msg(query, gfp_mask); 886 if (ret) { 887 ret = -EIO; 888 /* Remove the request */ 889 spin_lock_irqsave(&ib_nl_request_lock, flags); 890 list_del(&query->list); 891 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 892 } 893 894 return ret; 895 } 896 897 static int ib_nl_cancel_request(struct ib_sa_query *query) 898 { 899 unsigned long flags; 900 struct ib_sa_query *wait_query; 901 int found = 0; 902 903 spin_lock_irqsave(&ib_nl_request_lock, flags); 904 list_for_each_entry(wait_query, &ib_nl_request_list, list) { 905 /* Let the timeout to take care of the callback */ 906 if (query == wait_query) { 907 query->flags |= IB_SA_CANCEL; 908 query->timeout = jiffies; 909 list_move(&query->list, &ib_nl_request_list); 910 found = 1; 911 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1); 912 break; 913 } 914 } 915 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 916 917 return found; 918 } 919 920 static void send_handler(struct ib_mad_agent *agent, 921 struct ib_mad_send_wc *mad_send_wc); 922 923 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, 924 const struct nlmsghdr *nlh) 925 { 926 struct ib_mad_send_wc mad_send_wc; 927 struct ib_sa_mad *mad = NULL; 928 const struct nlattr *head, *curr; 929 struct ib_path_rec_data *rec; 930 int len, rem; 931 u32 mask = 0; 932 int status = -EIO; 933 934 if (query->callback) { 935 head = (const struct nlattr *) nlmsg_data(nlh); 936 len = nlmsg_len(nlh); 937 switch (query->path_use) { 938 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL: 939 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND; 940 break; 941 942 case LS_RESOLVE_PATH_USE_ALL: 943 case LS_RESOLVE_PATH_USE_GMP: 944 default: 945 mask = IB_PATH_PRIMARY | IB_PATH_GMP | 946 IB_PATH_BIDIRECTIONAL; 947 break; 948 } 949 nla_for_each_attr(curr, head, len, rem) { 950 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) { 951 rec = nla_data(curr); 952 /* 953 * Get the first one. In the future, we may 954 * need to get up to 6 pathrecords. 955 */ 956 if ((rec->flags & mask) == mask) { 957 mad = query->mad_buf->mad; 958 mad->mad_hdr.method |= 959 IB_MGMT_METHOD_RESP; 960 memcpy(mad->data, rec->path_rec, 961 sizeof(rec->path_rec)); 962 status = 0; 963 break; 964 } 965 } 966 } 967 query->callback(query, status, mad); 968 } 969 970 mad_send_wc.send_buf = query->mad_buf; 971 mad_send_wc.status = IB_WC_SUCCESS; 972 send_handler(query->mad_buf->mad_agent, &mad_send_wc); 973 } 974 975 static void ib_nl_request_timeout(struct work_struct *work) 976 { 977 unsigned long flags; 978 struct ib_sa_query *query; 979 unsigned long delay; 980 struct ib_mad_send_wc mad_send_wc; 981 int ret; 982 983 spin_lock_irqsave(&ib_nl_request_lock, flags); 984 while (!list_empty(&ib_nl_request_list)) { 985 query = list_entry(ib_nl_request_list.next, 986 struct ib_sa_query, list); 987 988 if (time_after(query->timeout, jiffies)) { 989 delay = query->timeout - jiffies; 990 if ((long)delay <= 0) 991 delay = 1; 992 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 993 break; 994 } 995 996 list_del(&query->list); 997 ib_sa_disable_local_svc(query); 998 /* Hold the lock to protect against query cancellation */ 999 if (ib_sa_query_cancelled(query)) 1000 ret = -1; 1001 else 1002 ret = ib_post_send_mad(query->mad_buf, NULL); 1003 if (ret) { 1004 mad_send_wc.send_buf = query->mad_buf; 1005 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 1006 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1007 send_handler(query->port->agent, &mad_send_wc); 1008 spin_lock_irqsave(&ib_nl_request_lock, flags); 1009 } 1010 } 1011 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1012 } 1013 1014 int ib_nl_handle_set_timeout(struct sk_buff *skb, 1015 struct nlmsghdr *nlh, 1016 struct netlink_ext_ack *extack) 1017 { 1018 int timeout, delta, abs_delta; 1019 const struct nlattr *attr; 1020 unsigned long flags; 1021 struct ib_sa_query *query; 1022 long delay = 0; 1023 struct nlattr *tb[LS_NLA_TYPE_MAX]; 1024 int ret; 1025 1026 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) || 1027 !(NETLINK_CB(skb).sk)) 1028 return -EPERM; 1029 1030 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 1031 nlmsg_len(nlh), ib_nl_policy, NULL); 1032 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; 1033 if (ret || !attr) 1034 goto settimeout_out; 1035 1036 timeout = *(int *) nla_data(attr); 1037 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN) 1038 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN; 1039 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) 1040 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; 1041 1042 delta = timeout - sa_local_svc_timeout_ms; 1043 if (delta < 0) 1044 abs_delta = -delta; 1045 else 1046 abs_delta = delta; 1047 1048 if (delta != 0) { 1049 spin_lock_irqsave(&ib_nl_request_lock, flags); 1050 sa_local_svc_timeout_ms = timeout; 1051 list_for_each_entry(query, &ib_nl_request_list, list) { 1052 if (delta < 0 && abs_delta > query->timeout) 1053 query->timeout = 0; 1054 else 1055 query->timeout += delta; 1056 1057 /* Get the new delay from the first entry */ 1058 if (!delay) { 1059 delay = query->timeout - jiffies; 1060 if (delay <= 0) 1061 delay = 1; 1062 } 1063 } 1064 if (delay) 1065 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1066 (unsigned long)delay); 1067 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1068 } 1069 1070 settimeout_out: 1071 return 0; 1072 } 1073 1074 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) 1075 { 1076 struct nlattr *tb[LS_NLA_TYPE_MAX]; 1077 int ret; 1078 1079 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) 1080 return 0; 1081 1082 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 1083 nlmsg_len(nlh), ib_nl_policy, NULL); 1084 if (ret) 1085 return 0; 1086 1087 return 1; 1088 } 1089 1090 int ib_nl_handle_resolve_resp(struct sk_buff *skb, 1091 struct nlmsghdr *nlh, 1092 struct netlink_ext_ack *extack) 1093 { 1094 unsigned long flags; 1095 struct ib_sa_query *query; 1096 struct ib_mad_send_buf *send_buf; 1097 struct ib_mad_send_wc mad_send_wc; 1098 int found = 0; 1099 int ret; 1100 1101 if ((nlh->nlmsg_flags & NLM_F_REQUEST) || 1102 !(NETLINK_CB(skb).sk)) 1103 return -EPERM; 1104 1105 spin_lock_irqsave(&ib_nl_request_lock, flags); 1106 list_for_each_entry(query, &ib_nl_request_list, list) { 1107 /* 1108 * If the query is cancelled, let the timeout routine 1109 * take care of it. 1110 */ 1111 if (nlh->nlmsg_seq == query->seq) { 1112 found = !ib_sa_query_cancelled(query); 1113 if (found) 1114 list_del(&query->list); 1115 break; 1116 } 1117 } 1118 1119 if (!found) { 1120 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1121 goto resp_out; 1122 } 1123 1124 send_buf = query->mad_buf; 1125 1126 if (!ib_nl_is_good_resolve_resp(nlh)) { 1127 /* if the result is a failure, send out the packet via IB */ 1128 ib_sa_disable_local_svc(query); 1129 ret = ib_post_send_mad(query->mad_buf, NULL); 1130 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1131 if (ret) { 1132 mad_send_wc.send_buf = send_buf; 1133 mad_send_wc.status = IB_WC_GENERAL_ERR; 1134 send_handler(query->port->agent, &mad_send_wc); 1135 } 1136 } else { 1137 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1138 ib_nl_process_good_resolve_rsp(query, nlh); 1139 } 1140 1141 resp_out: 1142 return 0; 1143 } 1144 1145 static void free_sm_ah(struct kref *kref) 1146 { 1147 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 1148 1149 rdma_destroy_ah(sm_ah->ah, 0); 1150 kfree(sm_ah); 1151 } 1152 1153 void ib_sa_register_client(struct ib_sa_client *client) 1154 { 1155 atomic_set(&client->users, 1); 1156 init_completion(&client->comp); 1157 } 1158 EXPORT_SYMBOL(ib_sa_register_client); 1159 1160 void ib_sa_unregister_client(struct ib_sa_client *client) 1161 { 1162 ib_sa_client_put(client); 1163 wait_for_completion(&client->comp); 1164 } 1165 EXPORT_SYMBOL(ib_sa_unregister_client); 1166 1167 /** 1168 * ib_sa_cancel_query - try to cancel an SA query 1169 * @id:ID of query to cancel 1170 * @query:query pointer to cancel 1171 * 1172 * Try to cancel an SA query. If the id and query don't match up or 1173 * the query has already completed, nothing is done. Otherwise the 1174 * query is canceled and will complete with a status of -EINTR. 1175 */ 1176 void ib_sa_cancel_query(int id, struct ib_sa_query *query) 1177 { 1178 unsigned long flags; 1179 struct ib_mad_agent *agent; 1180 struct ib_mad_send_buf *mad_buf; 1181 1182 xa_lock_irqsave(&queries, flags); 1183 if (xa_load(&queries, id) != query) { 1184 xa_unlock_irqrestore(&queries, flags); 1185 return; 1186 } 1187 agent = query->port->agent; 1188 mad_buf = query->mad_buf; 1189 xa_unlock_irqrestore(&queries, flags); 1190 1191 /* 1192 * If the query is still on the netlink request list, schedule 1193 * it to be cancelled by the timeout routine. Otherwise, it has been 1194 * sent to the MAD layer and has to be cancelled from there. 1195 */ 1196 if (!ib_nl_cancel_request(query)) 1197 ib_cancel_mad(agent, mad_buf); 1198 } 1199 EXPORT_SYMBOL(ib_sa_cancel_query); 1200 1201 static u8 get_src_path_mask(struct ib_device *device, u8 port_num) 1202 { 1203 struct ib_sa_device *sa_dev; 1204 struct ib_sa_port *port; 1205 unsigned long flags; 1206 u8 src_path_mask; 1207 1208 sa_dev = ib_get_client_data(device, &sa_client); 1209 if (!sa_dev) 1210 return 0x7f; 1211 1212 port = &sa_dev->port[port_num - sa_dev->start_port]; 1213 spin_lock_irqsave(&port->ah_lock, flags); 1214 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; 1215 spin_unlock_irqrestore(&port->ah_lock, flags); 1216 1217 return src_path_mask; 1218 } 1219 1220 static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num, 1221 struct sa_path_rec *rec, 1222 struct rdma_ah_attr *ah_attr, 1223 const struct ib_gid_attr *gid_attr) 1224 { 1225 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec); 1226 1227 if (!gid_attr) { 1228 gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type, 1229 port_num, NULL); 1230 if (IS_ERR(gid_attr)) 1231 return PTR_ERR(gid_attr); 1232 } else 1233 rdma_hold_gid_attr(gid_attr); 1234 1235 rdma_move_grh_sgid_attr(ah_attr, &rec->dgid, 1236 be32_to_cpu(rec->flow_label), 1237 rec->hop_limit, rec->traffic_class, 1238 gid_attr); 1239 return 0; 1240 } 1241 1242 /** 1243 * ib_init_ah_attr_from_path - Initialize address handle attributes based on 1244 * an SA path record. 1245 * @device: Device associated ah attributes initialization. 1246 * @port_num: Port on the specified device. 1247 * @rec: path record entry to use for ah attributes initialization. 1248 * @ah_attr: address handle attributes to initialization from path record. 1249 * @gid_attr: SGID attribute to consider during initialization. 1250 * 1251 * When ib_init_ah_attr_from_path() returns success, 1252 * (a) for IB link layer it optionally contains a reference to SGID attribute 1253 * when GRH is present for IB link layer. 1254 * (b) for RoCE link layer it contains a reference to SGID attribute. 1255 * User must invoke rdma_destroy_ah_attr() to release reference to SGID 1256 * attributes which are initialized using ib_init_ah_attr_from_path(). 1257 */ 1258 int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, 1259 struct sa_path_rec *rec, 1260 struct rdma_ah_attr *ah_attr, 1261 const struct ib_gid_attr *gid_attr) 1262 { 1263 int ret = 0; 1264 1265 memset(ah_attr, 0, sizeof(*ah_attr)); 1266 ah_attr->type = rdma_ah_find_type(device, port_num); 1267 rdma_ah_set_sl(ah_attr, rec->sl); 1268 rdma_ah_set_port_num(ah_attr, port_num); 1269 rdma_ah_set_static_rate(ah_attr, rec->rate); 1270 1271 if (sa_path_is_roce(rec)) { 1272 ret = roce_resolve_route_from_path(rec, gid_attr); 1273 if (ret) 1274 return ret; 1275 1276 memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN); 1277 } else { 1278 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec))); 1279 if (sa_path_is_opa(rec) && 1280 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) 1281 rdma_ah_set_make_grd(ah_attr, true); 1282 1283 rdma_ah_set_path_bits(ah_attr, 1284 be32_to_cpu(sa_path_get_slid(rec)) & 1285 get_src_path_mask(device, port_num)); 1286 } 1287 1288 if (rec->hop_limit > 0 || sa_path_is_roce(rec)) 1289 ret = init_ah_attr_grh_fields(device, port_num, 1290 rec, ah_attr, gid_attr); 1291 return ret; 1292 } 1293 EXPORT_SYMBOL(ib_init_ah_attr_from_path); 1294 1295 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) 1296 { 1297 struct rdma_ah_attr ah_attr; 1298 unsigned long flags; 1299 1300 spin_lock_irqsave(&query->port->ah_lock, flags); 1301 if (!query->port->sm_ah) { 1302 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1303 return -EAGAIN; 1304 } 1305 kref_get(&query->port->sm_ah->ref); 1306 query->sm_ah = query->port->sm_ah; 1307 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1308 1309 /* 1310 * Always check if sm_ah has valid dlid assigned, 1311 * before querying for class port info 1312 */ 1313 if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) || 1314 !rdma_is_valid_unicast_lid(&ah_attr)) { 1315 kref_put(&query->sm_ah->ref, free_sm_ah); 1316 return -EAGAIN; 1317 } 1318 query->mad_buf = ib_create_send_mad(query->port->agent, 1, 1319 query->sm_ah->pkey_index, 1320 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 1321 gfp_mask, 1322 ((query->flags & IB_SA_QUERY_OPA) ? 1323 OPA_MGMT_BASE_VERSION : 1324 IB_MGMT_BASE_VERSION)); 1325 if (IS_ERR(query->mad_buf)) { 1326 kref_put(&query->sm_ah->ref, free_sm_ah); 1327 return -ENOMEM; 1328 } 1329 1330 query->mad_buf->ah = query->sm_ah->ah; 1331 1332 return 0; 1333 } 1334 1335 static void free_mad(struct ib_sa_query *query) 1336 { 1337 ib_free_send_mad(query->mad_buf); 1338 kref_put(&query->sm_ah->ref, free_sm_ah); 1339 } 1340 1341 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent) 1342 { 1343 struct ib_sa_mad *mad = query->mad_buf->mad; 1344 unsigned long flags; 1345 1346 memset(mad, 0, sizeof *mad); 1347 1348 if (query->flags & IB_SA_QUERY_OPA) { 1349 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION; 1350 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION; 1351 } else { 1352 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; 1353 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; 1354 } 1355 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 1356 spin_lock_irqsave(&tid_lock, flags); 1357 mad->mad_hdr.tid = 1358 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); 1359 spin_unlock_irqrestore(&tid_lock, flags); 1360 } 1361 1362 static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms, 1363 gfp_t gfp_mask) 1364 { 1365 unsigned long flags; 1366 int ret, id; 1367 1368 xa_lock_irqsave(&queries, flags); 1369 ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask); 1370 xa_unlock_irqrestore(&queries, flags); 1371 if (ret < 0) 1372 return ret; 1373 1374 query->mad_buf->timeout_ms = timeout_ms; 1375 query->mad_buf->context[0] = query; 1376 query->id = id; 1377 1378 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) && 1379 (!(query->flags & IB_SA_QUERY_OPA))) { 1380 if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) { 1381 if (!ib_nl_make_request(query, gfp_mask)) 1382 return id; 1383 } 1384 ib_sa_disable_local_svc(query); 1385 } 1386 1387 ret = ib_post_send_mad(query->mad_buf, NULL); 1388 if (ret) { 1389 xa_lock_irqsave(&queries, flags); 1390 __xa_erase(&queries, id); 1391 xa_unlock_irqrestore(&queries, flags); 1392 } 1393 1394 /* 1395 * It's not safe to dereference query any more, because the 1396 * send may already have completed and freed the query in 1397 * another context. 1398 */ 1399 return ret ? ret : id; 1400 } 1401 1402 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec) 1403 { 1404 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); 1405 } 1406 EXPORT_SYMBOL(ib_sa_unpack_path); 1407 1408 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute) 1409 { 1410 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); 1411 } 1412 EXPORT_SYMBOL(ib_sa_pack_path); 1413 1414 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client, 1415 struct ib_sa_device *sa_dev, 1416 u8 port_num) 1417 { 1418 struct ib_sa_port *port; 1419 unsigned long flags; 1420 bool ret = false; 1421 1422 port = &sa_dev->port[port_num - sa_dev->start_port]; 1423 spin_lock_irqsave(&port->classport_lock, flags); 1424 if (!port->classport_info.valid) 1425 goto ret; 1426 1427 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA) 1428 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) & 1429 OPA_CLASS_PORT_INFO_PR_SUPPORT; 1430 ret: 1431 spin_unlock_irqrestore(&port->classport_lock, flags); 1432 return ret; 1433 } 1434 1435 enum opa_pr_supported { 1436 PR_NOT_SUPPORTED, 1437 PR_OPA_SUPPORTED, 1438 PR_IB_SUPPORTED 1439 }; 1440 1441 /** 1442 * Check if current PR query can be an OPA query. 1443 * Retuns PR_NOT_SUPPORTED if a path record query is not 1444 * possible, PR_OPA_SUPPORTED if an OPA path record query 1445 * is possible and PR_IB_SUPPORTED if an IB path record 1446 * query is possible. 1447 */ 1448 static int opa_pr_query_possible(struct ib_sa_client *client, 1449 struct ib_sa_device *sa_dev, 1450 struct ib_device *device, u8 port_num, 1451 struct sa_path_rec *rec) 1452 { 1453 struct ib_port_attr port_attr; 1454 1455 if (ib_query_port(device, port_num, &port_attr)) 1456 return PR_NOT_SUPPORTED; 1457 1458 if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num)) 1459 return PR_OPA_SUPPORTED; 1460 1461 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 1462 return PR_NOT_SUPPORTED; 1463 else 1464 return PR_IB_SUPPORTED; 1465 } 1466 1467 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 1468 int status, 1469 struct ib_sa_mad *mad) 1470 { 1471 struct ib_sa_path_query *query = 1472 container_of(sa_query, struct ib_sa_path_query, sa_query); 1473 1474 if (mad) { 1475 struct sa_path_rec rec; 1476 1477 if (sa_query->flags & IB_SA_QUERY_OPA) { 1478 ib_unpack(opa_path_rec_table, 1479 ARRAY_SIZE(opa_path_rec_table), 1480 mad->data, &rec); 1481 rec.rec_type = SA_PATH_REC_TYPE_OPA; 1482 query->callback(status, &rec, query->context); 1483 } else { 1484 ib_unpack(path_rec_table, 1485 ARRAY_SIZE(path_rec_table), 1486 mad->data, &rec); 1487 rec.rec_type = SA_PATH_REC_TYPE_IB; 1488 sa_path_set_dmac_zero(&rec); 1489 1490 if (query->conv_pr) { 1491 struct sa_path_rec opa; 1492 1493 memset(&opa, 0, sizeof(struct sa_path_rec)); 1494 sa_convert_path_ib_to_opa(&opa, &rec); 1495 query->callback(status, &opa, query->context); 1496 } else { 1497 query->callback(status, &rec, query->context); 1498 } 1499 } 1500 } else 1501 query->callback(status, NULL, query->context); 1502 } 1503 1504 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 1505 { 1506 struct ib_sa_path_query *query = 1507 container_of(sa_query, struct ib_sa_path_query, sa_query); 1508 1509 kfree(query->conv_pr); 1510 kfree(query); 1511 } 1512 1513 /** 1514 * ib_sa_path_rec_get - Start a Path get query 1515 * @client:SA client 1516 * @device:device to send query on 1517 * @port_num: port number to send query on 1518 * @rec:Path Record to send in query 1519 * @comp_mask:component mask to send in query 1520 * @timeout_ms:time to wait for response 1521 * @gfp_mask:GFP mask to use for internal allocations 1522 * @callback:function called when query completes, times out or is 1523 * canceled 1524 * @context:opaque user context passed to callback 1525 * @sa_query:query context, used to cancel query 1526 * 1527 * Send a Path Record Get query to the SA to look up a path. The 1528 * callback function will be called when the query completes (or 1529 * fails); status is 0 for a successful response, -EINTR if the query 1530 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1531 * occurred sending the query. The resp parameter of the callback is 1532 * only valid if status is 0. 1533 * 1534 * If the return value of ib_sa_path_rec_get() is negative, it is an 1535 * error code. Otherwise it is a query ID that can be used to cancel 1536 * the query. 1537 */ 1538 int ib_sa_path_rec_get(struct ib_sa_client *client, 1539 struct ib_device *device, u8 port_num, 1540 struct sa_path_rec *rec, 1541 ib_sa_comp_mask comp_mask, 1542 unsigned long timeout_ms, gfp_t gfp_mask, 1543 void (*callback)(int status, 1544 struct sa_path_rec *resp, 1545 void *context), 1546 void *context, 1547 struct ib_sa_query **sa_query) 1548 { 1549 struct ib_sa_path_query *query; 1550 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1551 struct ib_sa_port *port; 1552 struct ib_mad_agent *agent; 1553 struct ib_sa_mad *mad; 1554 enum opa_pr_supported status; 1555 int ret; 1556 1557 if (!sa_dev) 1558 return -ENODEV; 1559 1560 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) && 1561 (rec->rec_type != SA_PATH_REC_TYPE_OPA)) 1562 return -EINVAL; 1563 1564 port = &sa_dev->port[port_num - sa_dev->start_port]; 1565 agent = port->agent; 1566 1567 query = kzalloc(sizeof(*query), gfp_mask); 1568 if (!query) 1569 return -ENOMEM; 1570 1571 query->sa_query.port = port; 1572 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { 1573 status = opa_pr_query_possible(client, sa_dev, device, port_num, 1574 rec); 1575 if (status == PR_NOT_SUPPORTED) { 1576 ret = -EINVAL; 1577 goto err1; 1578 } else if (status == PR_OPA_SUPPORTED) { 1579 query->sa_query.flags |= IB_SA_QUERY_OPA; 1580 } else { 1581 query->conv_pr = 1582 kmalloc(sizeof(*query->conv_pr), gfp_mask); 1583 if (!query->conv_pr) { 1584 ret = -ENOMEM; 1585 goto err1; 1586 } 1587 } 1588 } 1589 1590 ret = alloc_mad(&query->sa_query, gfp_mask); 1591 if (ret) 1592 goto err2; 1593 1594 ib_sa_client_get(client); 1595 query->sa_query.client = client; 1596 query->callback = callback; 1597 query->context = context; 1598 1599 mad = query->sa_query.mad_buf->mad; 1600 init_mad(&query->sa_query, agent); 1601 1602 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 1603 query->sa_query.release = ib_sa_path_rec_release; 1604 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1605 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 1606 mad->sa_hdr.comp_mask = comp_mask; 1607 1608 if (query->sa_query.flags & IB_SA_QUERY_OPA) { 1609 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), 1610 rec, mad->data); 1611 } else if (query->conv_pr) { 1612 sa_convert_path_opa_to_ib(query->conv_pr, rec); 1613 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 1614 query->conv_pr, mad->data); 1615 } else { 1616 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 1617 rec, mad->data); 1618 } 1619 1620 *sa_query = &query->sa_query; 1621 1622 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; 1623 query->sa_query.mad_buf->context[1] = (query->conv_pr) ? 1624 query->conv_pr : rec; 1625 1626 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1627 if (ret < 0) 1628 goto err3; 1629 1630 return ret; 1631 1632 err3: 1633 *sa_query = NULL; 1634 ib_sa_client_put(query->sa_query.client); 1635 free_mad(&query->sa_query); 1636 err2: 1637 kfree(query->conv_pr); 1638 err1: 1639 kfree(query); 1640 return ret; 1641 } 1642 EXPORT_SYMBOL(ib_sa_path_rec_get); 1643 1644 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, 1645 int status, 1646 struct ib_sa_mad *mad) 1647 { 1648 struct ib_sa_service_query *query = 1649 container_of(sa_query, struct ib_sa_service_query, sa_query); 1650 1651 if (mad) { 1652 struct ib_sa_service_rec rec; 1653 1654 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), 1655 mad->data, &rec); 1656 query->callback(status, &rec, query->context); 1657 } else 1658 query->callback(status, NULL, query->context); 1659 } 1660 1661 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) 1662 { 1663 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); 1664 } 1665 1666 /** 1667 * ib_sa_service_rec_query - Start Service Record operation 1668 * @client:SA client 1669 * @device:device to send request on 1670 * @port_num: port number to send request on 1671 * @method:SA method - should be get, set, or delete 1672 * @rec:Service Record to send in request 1673 * @comp_mask:component mask to send in request 1674 * @timeout_ms:time to wait for response 1675 * @gfp_mask:GFP mask to use for internal allocations 1676 * @callback:function called when request completes, times out or is 1677 * canceled 1678 * @context:opaque user context passed to callback 1679 * @sa_query:request context, used to cancel request 1680 * 1681 * Send a Service Record set/get/delete to the SA to register, 1682 * unregister or query a service record. 1683 * The callback function will be called when the request completes (or 1684 * fails); status is 0 for a successful response, -EINTR if the query 1685 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1686 * occurred sending the query. The resp parameter of the callback is 1687 * only valid if status is 0. 1688 * 1689 * If the return value of ib_sa_service_rec_query() is negative, it is an 1690 * error code. Otherwise it is a request ID that can be used to cancel 1691 * the query. 1692 */ 1693 int ib_sa_service_rec_query(struct ib_sa_client *client, 1694 struct ib_device *device, u8 port_num, u8 method, 1695 struct ib_sa_service_rec *rec, 1696 ib_sa_comp_mask comp_mask, 1697 unsigned long timeout_ms, gfp_t gfp_mask, 1698 void (*callback)(int status, 1699 struct ib_sa_service_rec *resp, 1700 void *context), 1701 void *context, 1702 struct ib_sa_query **sa_query) 1703 { 1704 struct ib_sa_service_query *query; 1705 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1706 struct ib_sa_port *port; 1707 struct ib_mad_agent *agent; 1708 struct ib_sa_mad *mad; 1709 int ret; 1710 1711 if (!sa_dev) 1712 return -ENODEV; 1713 1714 port = &sa_dev->port[port_num - sa_dev->start_port]; 1715 agent = port->agent; 1716 1717 if (method != IB_MGMT_METHOD_GET && 1718 method != IB_MGMT_METHOD_SET && 1719 method != IB_SA_METHOD_DELETE) 1720 return -EINVAL; 1721 1722 query = kzalloc(sizeof(*query), gfp_mask); 1723 if (!query) 1724 return -ENOMEM; 1725 1726 query->sa_query.port = port; 1727 ret = alloc_mad(&query->sa_query, gfp_mask); 1728 if (ret) 1729 goto err1; 1730 1731 ib_sa_client_get(client); 1732 query->sa_query.client = client; 1733 query->callback = callback; 1734 query->context = context; 1735 1736 mad = query->sa_query.mad_buf->mad; 1737 init_mad(&query->sa_query, agent); 1738 1739 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; 1740 query->sa_query.release = ib_sa_service_rec_release; 1741 mad->mad_hdr.method = method; 1742 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); 1743 mad->sa_hdr.comp_mask = comp_mask; 1744 1745 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), 1746 rec, mad->data); 1747 1748 *sa_query = &query->sa_query; 1749 1750 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1751 if (ret < 0) 1752 goto err2; 1753 1754 return ret; 1755 1756 err2: 1757 *sa_query = NULL; 1758 ib_sa_client_put(query->sa_query.client); 1759 free_mad(&query->sa_query); 1760 1761 err1: 1762 kfree(query); 1763 return ret; 1764 } 1765 EXPORT_SYMBOL(ib_sa_service_rec_query); 1766 1767 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 1768 int status, 1769 struct ib_sa_mad *mad) 1770 { 1771 struct ib_sa_mcmember_query *query = 1772 container_of(sa_query, struct ib_sa_mcmember_query, sa_query); 1773 1774 if (mad) { 1775 struct ib_sa_mcmember_rec rec; 1776 1777 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1778 mad->data, &rec); 1779 query->callback(status, &rec, query->context); 1780 } else 1781 query->callback(status, NULL, query->context); 1782 } 1783 1784 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 1785 { 1786 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 1787 } 1788 1789 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 1790 struct ib_device *device, u8 port_num, 1791 u8 method, 1792 struct ib_sa_mcmember_rec *rec, 1793 ib_sa_comp_mask comp_mask, 1794 unsigned long timeout_ms, gfp_t gfp_mask, 1795 void (*callback)(int status, 1796 struct ib_sa_mcmember_rec *resp, 1797 void *context), 1798 void *context, 1799 struct ib_sa_query **sa_query) 1800 { 1801 struct ib_sa_mcmember_query *query; 1802 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1803 struct ib_sa_port *port; 1804 struct ib_mad_agent *agent; 1805 struct ib_sa_mad *mad; 1806 int ret; 1807 1808 if (!sa_dev) 1809 return -ENODEV; 1810 1811 port = &sa_dev->port[port_num - sa_dev->start_port]; 1812 agent = port->agent; 1813 1814 query = kzalloc(sizeof(*query), gfp_mask); 1815 if (!query) 1816 return -ENOMEM; 1817 1818 query->sa_query.port = port; 1819 ret = alloc_mad(&query->sa_query, gfp_mask); 1820 if (ret) 1821 goto err1; 1822 1823 ib_sa_client_get(client); 1824 query->sa_query.client = client; 1825 query->callback = callback; 1826 query->context = context; 1827 1828 mad = query->sa_query.mad_buf->mad; 1829 init_mad(&query->sa_query, agent); 1830 1831 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 1832 query->sa_query.release = ib_sa_mcmember_rec_release; 1833 mad->mad_hdr.method = method; 1834 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 1835 mad->sa_hdr.comp_mask = comp_mask; 1836 1837 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1838 rec, mad->data); 1839 1840 *sa_query = &query->sa_query; 1841 1842 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1843 if (ret < 0) 1844 goto err2; 1845 1846 return ret; 1847 1848 err2: 1849 *sa_query = NULL; 1850 ib_sa_client_put(query->sa_query.client); 1851 free_mad(&query->sa_query); 1852 1853 err1: 1854 kfree(query); 1855 return ret; 1856 } 1857 1858 /* Support GuidInfoRecord */ 1859 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, 1860 int status, 1861 struct ib_sa_mad *mad) 1862 { 1863 struct ib_sa_guidinfo_query *query = 1864 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); 1865 1866 if (mad) { 1867 struct ib_sa_guidinfo_rec rec; 1868 1869 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), 1870 mad->data, &rec); 1871 query->callback(status, &rec, query->context); 1872 } else 1873 query->callback(status, NULL, query->context); 1874 } 1875 1876 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) 1877 { 1878 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); 1879 } 1880 1881 int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 1882 struct ib_device *device, u8 port_num, 1883 struct ib_sa_guidinfo_rec *rec, 1884 ib_sa_comp_mask comp_mask, u8 method, 1885 unsigned long timeout_ms, gfp_t gfp_mask, 1886 void (*callback)(int status, 1887 struct ib_sa_guidinfo_rec *resp, 1888 void *context), 1889 void *context, 1890 struct ib_sa_query **sa_query) 1891 { 1892 struct ib_sa_guidinfo_query *query; 1893 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1894 struct ib_sa_port *port; 1895 struct ib_mad_agent *agent; 1896 struct ib_sa_mad *mad; 1897 int ret; 1898 1899 if (!sa_dev) 1900 return -ENODEV; 1901 1902 if (method != IB_MGMT_METHOD_GET && 1903 method != IB_MGMT_METHOD_SET && 1904 method != IB_SA_METHOD_DELETE) { 1905 return -EINVAL; 1906 } 1907 1908 port = &sa_dev->port[port_num - sa_dev->start_port]; 1909 agent = port->agent; 1910 1911 query = kzalloc(sizeof(*query), gfp_mask); 1912 if (!query) 1913 return -ENOMEM; 1914 1915 query->sa_query.port = port; 1916 ret = alloc_mad(&query->sa_query, gfp_mask); 1917 if (ret) 1918 goto err1; 1919 1920 ib_sa_client_get(client); 1921 query->sa_query.client = client; 1922 query->callback = callback; 1923 query->context = context; 1924 1925 mad = query->sa_query.mad_buf->mad; 1926 init_mad(&query->sa_query, agent); 1927 1928 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; 1929 query->sa_query.release = ib_sa_guidinfo_rec_release; 1930 1931 mad->mad_hdr.method = method; 1932 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); 1933 mad->sa_hdr.comp_mask = comp_mask; 1934 1935 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, 1936 mad->data); 1937 1938 *sa_query = &query->sa_query; 1939 1940 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1941 if (ret < 0) 1942 goto err2; 1943 1944 return ret; 1945 1946 err2: 1947 *sa_query = NULL; 1948 ib_sa_client_put(query->sa_query.client); 1949 free_mad(&query->sa_query); 1950 1951 err1: 1952 kfree(query); 1953 return ret; 1954 } 1955 EXPORT_SYMBOL(ib_sa_guid_info_rec_query); 1956 1957 bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client, 1958 struct ib_device *device, 1959 u8 port_num) 1960 { 1961 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1962 struct ib_sa_port *port; 1963 bool ret = false; 1964 unsigned long flags; 1965 1966 if (!sa_dev) 1967 return ret; 1968 1969 port = &sa_dev->port[port_num - sa_dev->start_port]; 1970 1971 spin_lock_irqsave(&port->classport_lock, flags); 1972 if ((port->classport_info.valid) && 1973 (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB)) 1974 ret = ib_get_cpi_capmask2(&port->classport_info.data.ib) 1975 & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT; 1976 spin_unlock_irqrestore(&port->classport_lock, flags); 1977 return ret; 1978 } 1979 EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support); 1980 1981 struct ib_classport_info_context { 1982 struct completion done; 1983 struct ib_sa_query *sa_query; 1984 }; 1985 1986 static void ib_classportinfo_cb(void *context) 1987 { 1988 struct ib_classport_info_context *cb_ctx = context; 1989 1990 complete(&cb_ctx->done); 1991 } 1992 1993 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, 1994 int status, 1995 struct ib_sa_mad *mad) 1996 { 1997 unsigned long flags; 1998 struct ib_sa_classport_info_query *query = 1999 container_of(sa_query, struct ib_sa_classport_info_query, sa_query); 2000 struct ib_sa_classport_cache *info = &sa_query->port->classport_info; 2001 2002 if (mad) { 2003 if (sa_query->flags & IB_SA_QUERY_OPA) { 2004 struct opa_class_port_info rec; 2005 2006 ib_unpack(opa_classport_info_rec_table, 2007 ARRAY_SIZE(opa_classport_info_rec_table), 2008 mad->data, &rec); 2009 2010 spin_lock_irqsave(&sa_query->port->classport_lock, 2011 flags); 2012 if (!status && !info->valid) { 2013 memcpy(&info->data.opa, &rec, 2014 sizeof(info->data.opa)); 2015 2016 info->valid = true; 2017 info->data.type = RDMA_CLASS_PORT_INFO_OPA; 2018 } 2019 spin_unlock_irqrestore(&sa_query->port->classport_lock, 2020 flags); 2021 2022 } else { 2023 struct ib_class_port_info rec; 2024 2025 ib_unpack(ib_classport_info_rec_table, 2026 ARRAY_SIZE(ib_classport_info_rec_table), 2027 mad->data, &rec); 2028 2029 spin_lock_irqsave(&sa_query->port->classport_lock, 2030 flags); 2031 if (!status && !info->valid) { 2032 memcpy(&info->data.ib, &rec, 2033 sizeof(info->data.ib)); 2034 2035 info->valid = true; 2036 info->data.type = RDMA_CLASS_PORT_INFO_IB; 2037 } 2038 spin_unlock_irqrestore(&sa_query->port->classport_lock, 2039 flags); 2040 } 2041 } 2042 query->callback(query->context); 2043 } 2044 2045 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query) 2046 { 2047 kfree(container_of(sa_query, struct ib_sa_classport_info_query, 2048 sa_query)); 2049 } 2050 2051 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port, 2052 unsigned long timeout_ms, 2053 void (*callback)(void *context), 2054 void *context, 2055 struct ib_sa_query **sa_query) 2056 { 2057 struct ib_mad_agent *agent; 2058 struct ib_sa_classport_info_query *query; 2059 struct ib_sa_mad *mad; 2060 gfp_t gfp_mask = GFP_KERNEL; 2061 int ret; 2062 2063 agent = port->agent; 2064 2065 query = kzalloc(sizeof(*query), gfp_mask); 2066 if (!query) 2067 return -ENOMEM; 2068 2069 query->sa_query.port = port; 2070 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device, 2071 port->port_num) ? 2072 IB_SA_QUERY_OPA : 0; 2073 ret = alloc_mad(&query->sa_query, gfp_mask); 2074 if (ret) 2075 goto err_free; 2076 2077 query->callback = callback; 2078 query->context = context; 2079 2080 mad = query->sa_query.mad_buf->mad; 2081 init_mad(&query->sa_query, agent); 2082 2083 query->sa_query.callback = ib_sa_classport_info_rec_callback; 2084 query->sa_query.release = ib_sa_classport_info_rec_release; 2085 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 2086 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO); 2087 mad->sa_hdr.comp_mask = 0; 2088 *sa_query = &query->sa_query; 2089 2090 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 2091 if (ret < 0) 2092 goto err_free_mad; 2093 2094 return ret; 2095 2096 err_free_mad: 2097 *sa_query = NULL; 2098 free_mad(&query->sa_query); 2099 2100 err_free: 2101 kfree(query); 2102 return ret; 2103 } 2104 2105 static void update_ib_cpi(struct work_struct *work) 2106 { 2107 struct ib_sa_port *port = 2108 container_of(work, struct ib_sa_port, ib_cpi_work.work); 2109 struct ib_classport_info_context *cb_context; 2110 unsigned long flags; 2111 int ret; 2112 2113 /* If the classport info is valid, nothing 2114 * to do here. 2115 */ 2116 spin_lock_irqsave(&port->classport_lock, flags); 2117 if (port->classport_info.valid) { 2118 spin_unlock_irqrestore(&port->classport_lock, flags); 2119 return; 2120 } 2121 spin_unlock_irqrestore(&port->classport_lock, flags); 2122 2123 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL); 2124 if (!cb_context) 2125 goto err_nomem; 2126 2127 init_completion(&cb_context->done); 2128 2129 ret = ib_sa_classport_info_rec_query(port, 3000, 2130 ib_classportinfo_cb, cb_context, 2131 &cb_context->sa_query); 2132 if (ret < 0) 2133 goto free_cb_err; 2134 wait_for_completion(&cb_context->done); 2135 free_cb_err: 2136 kfree(cb_context); 2137 spin_lock_irqsave(&port->classport_lock, flags); 2138 2139 /* If the classport info is still not valid, the query should have 2140 * failed for some reason. Retry issuing the query 2141 */ 2142 if (!port->classport_info.valid) { 2143 port->classport_info.retry_cnt++; 2144 if (port->classport_info.retry_cnt <= 2145 IB_SA_CPI_MAX_RETRY_CNT) { 2146 unsigned long delay = 2147 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); 2148 2149 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay); 2150 } 2151 } 2152 spin_unlock_irqrestore(&port->classport_lock, flags); 2153 2154 err_nomem: 2155 return; 2156 } 2157 2158 static void send_handler(struct ib_mad_agent *agent, 2159 struct ib_mad_send_wc *mad_send_wc) 2160 { 2161 struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 2162 unsigned long flags; 2163 2164 if (query->callback) 2165 switch (mad_send_wc->status) { 2166 case IB_WC_SUCCESS: 2167 /* No callback -- already got recv */ 2168 break; 2169 case IB_WC_RESP_TIMEOUT_ERR: 2170 query->callback(query, -ETIMEDOUT, NULL); 2171 break; 2172 case IB_WC_WR_FLUSH_ERR: 2173 query->callback(query, -EINTR, NULL); 2174 break; 2175 default: 2176 query->callback(query, -EIO, NULL); 2177 break; 2178 } 2179 2180 xa_lock_irqsave(&queries, flags); 2181 __xa_erase(&queries, query->id); 2182 xa_unlock_irqrestore(&queries, flags); 2183 2184 free_mad(query); 2185 if (query->client) 2186 ib_sa_client_put(query->client); 2187 query->release(query); 2188 } 2189 2190 static void recv_handler(struct ib_mad_agent *mad_agent, 2191 struct ib_mad_send_buf *send_buf, 2192 struct ib_mad_recv_wc *mad_recv_wc) 2193 { 2194 struct ib_sa_query *query; 2195 2196 if (!send_buf) 2197 return; 2198 2199 query = send_buf->context[0]; 2200 if (query->callback) { 2201 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 2202 query->callback(query, 2203 mad_recv_wc->recv_buf.mad->mad_hdr.status ? 2204 -EINVAL : 0, 2205 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); 2206 else 2207 query->callback(query, -EIO, NULL); 2208 } 2209 2210 ib_free_recv_mad(mad_recv_wc); 2211 } 2212 2213 static void update_sm_ah(struct work_struct *work) 2214 { 2215 struct ib_sa_port *port = 2216 container_of(work, struct ib_sa_port, update_task); 2217 struct ib_sa_sm_ah *new_ah; 2218 struct ib_port_attr port_attr; 2219 struct rdma_ah_attr ah_attr; 2220 bool grh_required; 2221 2222 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { 2223 pr_warn("Couldn't query port\n"); 2224 return; 2225 } 2226 2227 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL); 2228 if (!new_ah) 2229 return; 2230 2231 kref_init(&new_ah->ref); 2232 new_ah->src_path_mask = (1 << port_attr.lmc) - 1; 2233 2234 new_ah->pkey_index = 0; 2235 if (ib_find_pkey(port->agent->device, port->port_num, 2236 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) 2237 pr_err("Couldn't find index for default PKey\n"); 2238 2239 memset(&ah_attr, 0, sizeof(ah_attr)); 2240 ah_attr.type = rdma_ah_find_type(port->agent->device, 2241 port->port_num); 2242 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid); 2243 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl); 2244 rdma_ah_set_port_num(&ah_attr, port->port_num); 2245 2246 grh_required = rdma_is_grh_required(port->agent->device, 2247 port->port_num); 2248 2249 /* 2250 * The OPA sm_lid of 0xFFFF needs special handling so that it can be 2251 * differentiated from a permissive LID of 0xFFFF. We set the 2252 * grh_required flag here so the SA can program the DGID in the 2253 * address handle appropriately 2254 */ 2255 if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA && 2256 (grh_required || 2257 port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE))) 2258 rdma_ah_set_make_grd(&ah_attr, true); 2259 2260 if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) { 2261 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH); 2262 rdma_ah_set_subnet_prefix(&ah_attr, 2263 cpu_to_be64(port_attr.subnet_prefix)); 2264 rdma_ah_set_interface_id(&ah_attr, 2265 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)); 2266 } 2267 2268 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr, 2269 RDMA_CREATE_AH_SLEEPABLE); 2270 if (IS_ERR(new_ah->ah)) { 2271 pr_warn("Couldn't create new SM AH\n"); 2272 kfree(new_ah); 2273 return; 2274 } 2275 2276 spin_lock_irq(&port->ah_lock); 2277 if (port->sm_ah) 2278 kref_put(&port->sm_ah->ref, free_sm_ah); 2279 port->sm_ah = new_ah; 2280 spin_unlock_irq(&port->ah_lock); 2281 } 2282 2283 static void ib_sa_event(struct ib_event_handler *handler, 2284 struct ib_event *event) 2285 { 2286 if (event->event == IB_EVENT_PORT_ERR || 2287 event->event == IB_EVENT_PORT_ACTIVE || 2288 event->event == IB_EVENT_LID_CHANGE || 2289 event->event == IB_EVENT_PKEY_CHANGE || 2290 event->event == IB_EVENT_SM_CHANGE || 2291 event->event == IB_EVENT_CLIENT_REREGISTER) { 2292 unsigned long flags; 2293 struct ib_sa_device *sa_dev = 2294 container_of(handler, typeof(*sa_dev), event_handler); 2295 u8 port_num = event->element.port_num - sa_dev->start_port; 2296 struct ib_sa_port *port = &sa_dev->port[port_num]; 2297 2298 if (!rdma_cap_ib_sa(handler->device, port->port_num)) 2299 return; 2300 2301 spin_lock_irqsave(&port->ah_lock, flags); 2302 if (port->sm_ah) 2303 kref_put(&port->sm_ah->ref, free_sm_ah); 2304 port->sm_ah = NULL; 2305 spin_unlock_irqrestore(&port->ah_lock, flags); 2306 2307 if (event->event == IB_EVENT_SM_CHANGE || 2308 event->event == IB_EVENT_CLIENT_REREGISTER || 2309 event->event == IB_EVENT_LID_CHANGE || 2310 event->event == IB_EVENT_PORT_ACTIVE) { 2311 unsigned long delay = 2312 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); 2313 2314 spin_lock_irqsave(&port->classport_lock, flags); 2315 port->classport_info.valid = false; 2316 port->classport_info.retry_cnt = 0; 2317 spin_unlock_irqrestore(&port->classport_lock, flags); 2318 queue_delayed_work(ib_wq, 2319 &port->ib_cpi_work, delay); 2320 } 2321 queue_work(ib_wq, &sa_dev->port[port_num].update_task); 2322 } 2323 } 2324 2325 static int ib_sa_add_one(struct ib_device *device) 2326 { 2327 struct ib_sa_device *sa_dev; 2328 int s, e, i; 2329 int count = 0; 2330 int ret; 2331 2332 s = rdma_start_port(device); 2333 e = rdma_end_port(device); 2334 2335 sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL); 2336 if (!sa_dev) 2337 return -ENOMEM; 2338 2339 sa_dev->start_port = s; 2340 sa_dev->end_port = e; 2341 2342 for (i = 0; i <= e - s; ++i) { 2343 spin_lock_init(&sa_dev->port[i].ah_lock); 2344 if (!rdma_cap_ib_sa(device, i + 1)) 2345 continue; 2346 2347 sa_dev->port[i].sm_ah = NULL; 2348 sa_dev->port[i].port_num = i + s; 2349 2350 spin_lock_init(&sa_dev->port[i].classport_lock); 2351 sa_dev->port[i].classport_info.valid = false; 2352 2353 sa_dev->port[i].agent = 2354 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 2355 NULL, 0, send_handler, 2356 recv_handler, sa_dev, 0); 2357 if (IS_ERR(sa_dev->port[i].agent)) { 2358 ret = PTR_ERR(sa_dev->port[i].agent); 2359 goto err; 2360 } 2361 2362 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 2363 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work, 2364 update_ib_cpi); 2365 2366 count++; 2367 } 2368 2369 if (!count) { 2370 ret = -EOPNOTSUPP; 2371 goto free; 2372 } 2373 2374 ib_set_client_data(device, &sa_client, sa_dev); 2375 2376 /* 2377 * We register our event handler after everything is set up, 2378 * and then update our cached info after the event handler is 2379 * registered to avoid any problems if a port changes state 2380 * during our initialization. 2381 */ 2382 2383 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); 2384 ib_register_event_handler(&sa_dev->event_handler); 2385 2386 for (i = 0; i <= e - s; ++i) { 2387 if (rdma_cap_ib_sa(device, i + 1)) 2388 update_sm_ah(&sa_dev->port[i].update_task); 2389 } 2390 2391 return 0; 2392 2393 err: 2394 while (--i >= 0) { 2395 if (rdma_cap_ib_sa(device, i + 1)) 2396 ib_unregister_mad_agent(sa_dev->port[i].agent); 2397 } 2398 free: 2399 kfree(sa_dev); 2400 return ret; 2401 } 2402 2403 static void ib_sa_remove_one(struct ib_device *device, void *client_data) 2404 { 2405 struct ib_sa_device *sa_dev = client_data; 2406 int i; 2407 2408 ib_unregister_event_handler(&sa_dev->event_handler); 2409 flush_workqueue(ib_wq); 2410 2411 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 2412 if (rdma_cap_ib_sa(device, i + 1)) { 2413 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work); 2414 ib_unregister_mad_agent(sa_dev->port[i].agent); 2415 if (sa_dev->port[i].sm_ah) 2416 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 2417 } 2418 2419 } 2420 2421 kfree(sa_dev); 2422 } 2423 2424 int ib_sa_init(void) 2425 { 2426 int ret; 2427 2428 get_random_bytes(&tid, sizeof tid); 2429 2430 atomic_set(&ib_nl_sa_request_seq, 0); 2431 2432 ret = ib_register_client(&sa_client); 2433 if (ret) { 2434 pr_err("Couldn't register ib_sa client\n"); 2435 goto err1; 2436 } 2437 2438 ret = mcast_init(); 2439 if (ret) { 2440 pr_err("Couldn't initialize multicast handling\n"); 2441 goto err2; 2442 } 2443 2444 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM); 2445 if (!ib_nl_wq) { 2446 ret = -ENOMEM; 2447 goto err3; 2448 } 2449 2450 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); 2451 2452 return 0; 2453 2454 err3: 2455 mcast_cleanup(); 2456 err2: 2457 ib_unregister_client(&sa_client); 2458 err1: 2459 return ret; 2460 } 2461 2462 void ib_sa_cleanup(void) 2463 { 2464 cancel_delayed_work(&ib_nl_timed_work); 2465 flush_workqueue(ib_nl_wq); 2466 destroy_workqueue(ib_nl_wq); 2467 mcast_cleanup(); 2468 ib_unregister_client(&sa_client); 2469 WARN_ON(!xa_empty(&queries)); 2470 } 2471