1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/err.h> 38 #include <linux/random.h> 39 #include <linux/spinlock.h> 40 #include <linux/slab.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kref.h> 43 #include <linux/idr.h> 44 #include <linux/workqueue.h> 45 #include <uapi/linux/if_ether.h> 46 #include <rdma/ib_pack.h> 47 #include <rdma/ib_cache.h> 48 #include <rdma/rdma_netlink.h> 49 #include <net/netlink.h> 50 #include <uapi/rdma/ib_user_sa.h> 51 #include <rdma/ib_marshall.h> 52 #include <rdma/ib_addr.h> 53 #include "sa.h" 54 #include "core_priv.h" 55 56 MODULE_AUTHOR("Roland Dreier"); 57 MODULE_DESCRIPTION("InfiniBand subnet administration query support"); 58 MODULE_LICENSE("Dual BSD/GPL"); 59 60 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 61 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 62 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 63 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT; 64 65 struct ib_sa_sm_ah { 66 struct ib_ah *ah; 67 struct kref ref; 68 u16 pkey_index; 69 u8 src_path_mask; 70 }; 71 72 struct ib_sa_port { 73 struct ib_mad_agent *agent; 74 struct ib_sa_sm_ah *sm_ah; 75 struct work_struct update_task; 76 spinlock_t ah_lock; 77 u8 port_num; 78 }; 79 80 struct ib_sa_device { 81 int start_port, end_port; 82 struct ib_event_handler event_handler; 83 struct ib_sa_port port[0]; 84 }; 85 86 struct ib_sa_query { 87 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); 88 void (*release)(struct ib_sa_query *); 89 struct ib_sa_client *client; 90 struct ib_sa_port *port; 91 struct ib_mad_send_buf *mad_buf; 92 struct ib_sa_sm_ah *sm_ah; 93 int id; 94 u32 flags; 95 struct list_head list; /* Local svc request list */ 96 u32 seq; /* Local svc request sequence number */ 97 unsigned long timeout; /* Local svc timeout */ 98 u8 path_use; /* How will the pathrecord be used */ 99 }; 100 101 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 102 #define IB_SA_CANCEL 0x00000002 103 104 struct ib_sa_service_query { 105 void (*callback)(int, struct ib_sa_service_rec *, void *); 106 void *context; 107 struct ib_sa_query sa_query; 108 }; 109 110 struct ib_sa_path_query { 111 void (*callback)(int, struct ib_sa_path_rec *, void *); 112 void *context; 113 struct ib_sa_query sa_query; 114 }; 115 116 struct ib_sa_guidinfo_query { 117 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); 118 void *context; 119 struct ib_sa_query sa_query; 120 }; 121 122 struct ib_sa_mcmember_query { 123 void (*callback)(int, struct ib_sa_mcmember_rec *, void *); 124 void *context; 125 struct ib_sa_query sa_query; 126 }; 127 128 static LIST_HEAD(ib_nl_request_list); 129 static DEFINE_SPINLOCK(ib_nl_request_lock); 130 static atomic_t ib_nl_sa_request_seq; 131 static struct workqueue_struct *ib_nl_wq; 132 static struct delayed_work ib_nl_timed_work; 133 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = { 134 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY, 135 .len = sizeof(struct ib_path_rec_data)}, 136 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32}, 137 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64}, 138 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, 139 .len = sizeof(struct rdma_nla_ls_gid)}, 140 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY, 141 .len = sizeof(struct rdma_nla_ls_gid)}, 142 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8}, 143 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16}, 144 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16}, 145 }; 146 147 148 static void ib_sa_add_one(struct ib_device *device); 149 static void ib_sa_remove_one(struct ib_device *device, void *client_data); 150 151 static struct ib_client sa_client = { 152 .name = "sa", 153 .add = ib_sa_add_one, 154 .remove = ib_sa_remove_one 155 }; 156 157 static DEFINE_SPINLOCK(idr_lock); 158 static DEFINE_IDR(query_idr); 159 160 static DEFINE_SPINLOCK(tid_lock); 161 static u32 tid; 162 163 #define PATH_REC_FIELD(field) \ 164 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \ 165 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \ 166 .field_name = "sa_path_rec:" #field 167 168 static const struct ib_field path_rec_table[] = { 169 { PATH_REC_FIELD(service_id), 170 .offset_words = 0, 171 .offset_bits = 0, 172 .size_bits = 64 }, 173 { PATH_REC_FIELD(dgid), 174 .offset_words = 2, 175 .offset_bits = 0, 176 .size_bits = 128 }, 177 { PATH_REC_FIELD(sgid), 178 .offset_words = 6, 179 .offset_bits = 0, 180 .size_bits = 128 }, 181 { PATH_REC_FIELD(dlid), 182 .offset_words = 10, 183 .offset_bits = 0, 184 .size_bits = 16 }, 185 { PATH_REC_FIELD(slid), 186 .offset_words = 10, 187 .offset_bits = 16, 188 .size_bits = 16 }, 189 { PATH_REC_FIELD(raw_traffic), 190 .offset_words = 11, 191 .offset_bits = 0, 192 .size_bits = 1 }, 193 { RESERVED, 194 .offset_words = 11, 195 .offset_bits = 1, 196 .size_bits = 3 }, 197 { PATH_REC_FIELD(flow_label), 198 .offset_words = 11, 199 .offset_bits = 4, 200 .size_bits = 20 }, 201 { PATH_REC_FIELD(hop_limit), 202 .offset_words = 11, 203 .offset_bits = 24, 204 .size_bits = 8 }, 205 { PATH_REC_FIELD(traffic_class), 206 .offset_words = 12, 207 .offset_bits = 0, 208 .size_bits = 8 }, 209 { PATH_REC_FIELD(reversible), 210 .offset_words = 12, 211 .offset_bits = 8, 212 .size_bits = 1 }, 213 { PATH_REC_FIELD(numb_path), 214 .offset_words = 12, 215 .offset_bits = 9, 216 .size_bits = 7 }, 217 { PATH_REC_FIELD(pkey), 218 .offset_words = 12, 219 .offset_bits = 16, 220 .size_bits = 16 }, 221 { PATH_REC_FIELD(qos_class), 222 .offset_words = 13, 223 .offset_bits = 0, 224 .size_bits = 12 }, 225 { PATH_REC_FIELD(sl), 226 .offset_words = 13, 227 .offset_bits = 12, 228 .size_bits = 4 }, 229 { PATH_REC_FIELD(mtu_selector), 230 .offset_words = 13, 231 .offset_bits = 16, 232 .size_bits = 2 }, 233 { PATH_REC_FIELD(mtu), 234 .offset_words = 13, 235 .offset_bits = 18, 236 .size_bits = 6 }, 237 { PATH_REC_FIELD(rate_selector), 238 .offset_words = 13, 239 .offset_bits = 24, 240 .size_bits = 2 }, 241 { PATH_REC_FIELD(rate), 242 .offset_words = 13, 243 .offset_bits = 26, 244 .size_bits = 6 }, 245 { PATH_REC_FIELD(packet_life_time_selector), 246 .offset_words = 14, 247 .offset_bits = 0, 248 .size_bits = 2 }, 249 { PATH_REC_FIELD(packet_life_time), 250 .offset_words = 14, 251 .offset_bits = 2, 252 .size_bits = 6 }, 253 { PATH_REC_FIELD(preference), 254 .offset_words = 14, 255 .offset_bits = 8, 256 .size_bits = 8 }, 257 { RESERVED, 258 .offset_words = 14, 259 .offset_bits = 16, 260 .size_bits = 48 }, 261 }; 262 263 #define MCMEMBER_REC_FIELD(field) \ 264 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ 265 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \ 266 .field_name = "sa_mcmember_rec:" #field 267 268 static const struct ib_field mcmember_rec_table[] = { 269 { MCMEMBER_REC_FIELD(mgid), 270 .offset_words = 0, 271 .offset_bits = 0, 272 .size_bits = 128 }, 273 { MCMEMBER_REC_FIELD(port_gid), 274 .offset_words = 4, 275 .offset_bits = 0, 276 .size_bits = 128 }, 277 { MCMEMBER_REC_FIELD(qkey), 278 .offset_words = 8, 279 .offset_bits = 0, 280 .size_bits = 32 }, 281 { MCMEMBER_REC_FIELD(mlid), 282 .offset_words = 9, 283 .offset_bits = 0, 284 .size_bits = 16 }, 285 { MCMEMBER_REC_FIELD(mtu_selector), 286 .offset_words = 9, 287 .offset_bits = 16, 288 .size_bits = 2 }, 289 { MCMEMBER_REC_FIELD(mtu), 290 .offset_words = 9, 291 .offset_bits = 18, 292 .size_bits = 6 }, 293 { MCMEMBER_REC_FIELD(traffic_class), 294 .offset_words = 9, 295 .offset_bits = 24, 296 .size_bits = 8 }, 297 { MCMEMBER_REC_FIELD(pkey), 298 .offset_words = 10, 299 .offset_bits = 0, 300 .size_bits = 16 }, 301 { MCMEMBER_REC_FIELD(rate_selector), 302 .offset_words = 10, 303 .offset_bits = 16, 304 .size_bits = 2 }, 305 { MCMEMBER_REC_FIELD(rate), 306 .offset_words = 10, 307 .offset_bits = 18, 308 .size_bits = 6 }, 309 { MCMEMBER_REC_FIELD(packet_life_time_selector), 310 .offset_words = 10, 311 .offset_bits = 24, 312 .size_bits = 2 }, 313 { MCMEMBER_REC_FIELD(packet_life_time), 314 .offset_words = 10, 315 .offset_bits = 26, 316 .size_bits = 6 }, 317 { MCMEMBER_REC_FIELD(sl), 318 .offset_words = 11, 319 .offset_bits = 0, 320 .size_bits = 4 }, 321 { MCMEMBER_REC_FIELD(flow_label), 322 .offset_words = 11, 323 .offset_bits = 4, 324 .size_bits = 20 }, 325 { MCMEMBER_REC_FIELD(hop_limit), 326 .offset_words = 11, 327 .offset_bits = 24, 328 .size_bits = 8 }, 329 { MCMEMBER_REC_FIELD(scope), 330 .offset_words = 12, 331 .offset_bits = 0, 332 .size_bits = 4 }, 333 { MCMEMBER_REC_FIELD(join_state), 334 .offset_words = 12, 335 .offset_bits = 4, 336 .size_bits = 4 }, 337 { MCMEMBER_REC_FIELD(proxy_join), 338 .offset_words = 12, 339 .offset_bits = 8, 340 .size_bits = 1 }, 341 { RESERVED, 342 .offset_words = 12, 343 .offset_bits = 9, 344 .size_bits = 23 }, 345 }; 346 347 #define SERVICE_REC_FIELD(field) \ 348 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \ 349 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \ 350 .field_name = "sa_service_rec:" #field 351 352 static const struct ib_field service_rec_table[] = { 353 { SERVICE_REC_FIELD(id), 354 .offset_words = 0, 355 .offset_bits = 0, 356 .size_bits = 64 }, 357 { SERVICE_REC_FIELD(gid), 358 .offset_words = 2, 359 .offset_bits = 0, 360 .size_bits = 128 }, 361 { SERVICE_REC_FIELD(pkey), 362 .offset_words = 6, 363 .offset_bits = 0, 364 .size_bits = 16 }, 365 { SERVICE_REC_FIELD(lease), 366 .offset_words = 7, 367 .offset_bits = 0, 368 .size_bits = 32 }, 369 { SERVICE_REC_FIELD(key), 370 .offset_words = 8, 371 .offset_bits = 0, 372 .size_bits = 128 }, 373 { SERVICE_REC_FIELD(name), 374 .offset_words = 12, 375 .offset_bits = 0, 376 .size_bits = 64*8 }, 377 { SERVICE_REC_FIELD(data8), 378 .offset_words = 28, 379 .offset_bits = 0, 380 .size_bits = 16*8 }, 381 { SERVICE_REC_FIELD(data16), 382 .offset_words = 32, 383 .offset_bits = 0, 384 .size_bits = 8*16 }, 385 { SERVICE_REC_FIELD(data32), 386 .offset_words = 36, 387 .offset_bits = 0, 388 .size_bits = 4*32 }, 389 { SERVICE_REC_FIELD(data64), 390 .offset_words = 40, 391 .offset_bits = 0, 392 .size_bits = 2*64 }, 393 }; 394 395 #define GUIDINFO_REC_FIELD(field) \ 396 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ 397 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ 398 .field_name = "sa_guidinfo_rec:" #field 399 400 static const struct ib_field guidinfo_rec_table[] = { 401 { GUIDINFO_REC_FIELD(lid), 402 .offset_words = 0, 403 .offset_bits = 0, 404 .size_bits = 16 }, 405 { GUIDINFO_REC_FIELD(block_num), 406 .offset_words = 0, 407 .offset_bits = 16, 408 .size_bits = 8 }, 409 { GUIDINFO_REC_FIELD(res1), 410 .offset_words = 0, 411 .offset_bits = 24, 412 .size_bits = 8 }, 413 { GUIDINFO_REC_FIELD(res2), 414 .offset_words = 1, 415 .offset_bits = 0, 416 .size_bits = 32 }, 417 { GUIDINFO_REC_FIELD(guid_info_list), 418 .offset_words = 2, 419 .offset_bits = 0, 420 .size_bits = 512 }, 421 }; 422 423 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) 424 { 425 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; 426 } 427 428 static inline int ib_sa_query_cancelled(struct ib_sa_query *query) 429 { 430 return (query->flags & IB_SA_CANCEL); 431 } 432 433 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, 434 struct ib_sa_query *query) 435 { 436 struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1]; 437 struct ib_sa_mad *mad = query->mad_buf->mad; 438 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; 439 u16 val16; 440 u64 val64; 441 struct rdma_ls_resolve_header *header; 442 443 query->mad_buf->context[1] = NULL; 444 445 /* Construct the family header first */ 446 header = (struct rdma_ls_resolve_header *) 447 skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 448 memcpy(header->device_name, query->port->agent->device->name, 449 LS_DEVICE_NAME_MAX); 450 header->port_num = query->port->port_num; 451 452 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && 453 sa_rec->reversible != 0) 454 query->path_use = LS_RESOLVE_PATH_USE_GMP; 455 else 456 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; 457 header->path_use = query->path_use; 458 459 /* Now build the attributes */ 460 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 461 val64 = be64_to_cpu(sa_rec->service_id); 462 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 463 sizeof(val64), &val64); 464 } 465 if (comp_mask & IB_SA_PATH_REC_DGID) 466 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID, 467 sizeof(sa_rec->dgid), &sa_rec->dgid); 468 if (comp_mask & IB_SA_PATH_REC_SGID) 469 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID, 470 sizeof(sa_rec->sgid), &sa_rec->sgid); 471 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 472 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS, 473 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class); 474 475 if (comp_mask & IB_SA_PATH_REC_PKEY) { 476 val16 = be16_to_cpu(sa_rec->pkey); 477 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY, 478 sizeof(val16), &val16); 479 } 480 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) { 481 val16 = be16_to_cpu(sa_rec->qos_class); 482 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS, 483 sizeof(val16), &val16); 484 } 485 } 486 487 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) 488 { 489 int len = 0; 490 491 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) 492 len += nla_total_size(sizeof(u64)); 493 if (comp_mask & IB_SA_PATH_REC_DGID) 494 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 495 if (comp_mask & IB_SA_PATH_REC_SGID) 496 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 497 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 498 len += nla_total_size(sizeof(u8)); 499 if (comp_mask & IB_SA_PATH_REC_PKEY) 500 len += nla_total_size(sizeof(u16)); 501 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) 502 len += nla_total_size(sizeof(u16)); 503 504 /* 505 * Make sure that at least some of the required comp_mask bits are 506 * set. 507 */ 508 if (WARN_ON(len == 0)) 509 return len; 510 511 /* Add the family header */ 512 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header)); 513 514 return len; 515 } 516 517 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) 518 { 519 struct sk_buff *skb = NULL; 520 struct nlmsghdr *nlh; 521 void *data; 522 int ret = 0; 523 struct ib_sa_mad *mad; 524 int len; 525 526 mad = query->mad_buf->mad; 527 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); 528 if (len <= 0) 529 return -EMSGSIZE; 530 531 skb = nlmsg_new(len, gfp_mask); 532 if (!skb) 533 return -ENOMEM; 534 535 /* Put nlmsg header only for now */ 536 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, 537 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); 538 if (!data) { 539 kfree_skb(skb); 540 return -EMSGSIZE; 541 } 542 543 /* Add attributes */ 544 ib_nl_set_path_rec_attrs(skb, query); 545 546 /* Repair the nlmsg header length */ 547 nlmsg_end(skb, nlh); 548 549 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask); 550 if (!ret) 551 ret = len; 552 else 553 ret = 0; 554 555 return ret; 556 } 557 558 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) 559 { 560 unsigned long flags; 561 unsigned long delay; 562 int ret; 563 564 INIT_LIST_HEAD(&query->list); 565 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 566 567 /* Put the request on the list first.*/ 568 spin_lock_irqsave(&ib_nl_request_lock, flags); 569 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 570 query->timeout = delay + jiffies; 571 list_add_tail(&query->list, &ib_nl_request_list); 572 /* Start the timeout if this is the only request */ 573 if (ib_nl_request_list.next == &query->list) 574 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 575 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 576 577 ret = ib_nl_send_msg(query, gfp_mask); 578 if (ret <= 0) { 579 ret = -EIO; 580 /* Remove the request */ 581 spin_lock_irqsave(&ib_nl_request_lock, flags); 582 list_del(&query->list); 583 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 584 } else { 585 ret = 0; 586 } 587 588 return ret; 589 } 590 591 static int ib_nl_cancel_request(struct ib_sa_query *query) 592 { 593 unsigned long flags; 594 struct ib_sa_query *wait_query; 595 int found = 0; 596 597 spin_lock_irqsave(&ib_nl_request_lock, flags); 598 list_for_each_entry(wait_query, &ib_nl_request_list, list) { 599 /* Let the timeout to take care of the callback */ 600 if (query == wait_query) { 601 query->flags |= IB_SA_CANCEL; 602 query->timeout = jiffies; 603 list_move(&query->list, &ib_nl_request_list); 604 found = 1; 605 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1); 606 break; 607 } 608 } 609 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 610 611 return found; 612 } 613 614 static void send_handler(struct ib_mad_agent *agent, 615 struct ib_mad_send_wc *mad_send_wc); 616 617 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, 618 const struct nlmsghdr *nlh) 619 { 620 struct ib_mad_send_wc mad_send_wc; 621 struct ib_sa_mad *mad = NULL; 622 const struct nlattr *head, *curr; 623 struct ib_path_rec_data *rec; 624 int len, rem; 625 u32 mask = 0; 626 int status = -EIO; 627 628 if (query->callback) { 629 head = (const struct nlattr *) nlmsg_data(nlh); 630 len = nlmsg_len(nlh); 631 switch (query->path_use) { 632 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL: 633 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND; 634 break; 635 636 case LS_RESOLVE_PATH_USE_ALL: 637 case LS_RESOLVE_PATH_USE_GMP: 638 default: 639 mask = IB_PATH_PRIMARY | IB_PATH_GMP | 640 IB_PATH_BIDIRECTIONAL; 641 break; 642 } 643 nla_for_each_attr(curr, head, len, rem) { 644 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) { 645 rec = nla_data(curr); 646 /* 647 * Get the first one. In the future, we may 648 * need to get up to 6 pathrecords. 649 */ 650 if ((rec->flags & mask) == mask) { 651 mad = query->mad_buf->mad; 652 mad->mad_hdr.method |= 653 IB_MGMT_METHOD_RESP; 654 memcpy(mad->data, rec->path_rec, 655 sizeof(rec->path_rec)); 656 status = 0; 657 break; 658 } 659 } 660 } 661 query->callback(query, status, mad); 662 } 663 664 mad_send_wc.send_buf = query->mad_buf; 665 mad_send_wc.status = IB_WC_SUCCESS; 666 send_handler(query->mad_buf->mad_agent, &mad_send_wc); 667 } 668 669 static void ib_nl_request_timeout(struct work_struct *work) 670 { 671 unsigned long flags; 672 struct ib_sa_query *query; 673 unsigned long delay; 674 struct ib_mad_send_wc mad_send_wc; 675 int ret; 676 677 spin_lock_irqsave(&ib_nl_request_lock, flags); 678 while (!list_empty(&ib_nl_request_list)) { 679 query = list_entry(ib_nl_request_list.next, 680 struct ib_sa_query, list); 681 682 if (time_after(query->timeout, jiffies)) { 683 delay = query->timeout - jiffies; 684 if ((long)delay <= 0) 685 delay = 1; 686 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 687 break; 688 } 689 690 list_del(&query->list); 691 ib_sa_disable_local_svc(query); 692 /* Hold the lock to protect against query cancellation */ 693 if (ib_sa_query_cancelled(query)) 694 ret = -1; 695 else 696 ret = ib_post_send_mad(query->mad_buf, NULL); 697 if (ret) { 698 mad_send_wc.send_buf = query->mad_buf; 699 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 700 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 701 send_handler(query->port->agent, &mad_send_wc); 702 spin_lock_irqsave(&ib_nl_request_lock, flags); 703 } 704 } 705 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 706 } 707 708 static int ib_nl_handle_set_timeout(struct sk_buff *skb, 709 struct netlink_callback *cb) 710 { 711 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; 712 int timeout, delta, abs_delta; 713 const struct nlattr *attr; 714 unsigned long flags; 715 struct ib_sa_query *query; 716 long delay = 0; 717 struct nlattr *tb[LS_NLA_TYPE_MAX]; 718 int ret; 719 720 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) || 721 !(NETLINK_CB(skb).sk) || 722 !netlink_capable(skb, CAP_NET_ADMIN)) 723 return -EPERM; 724 725 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 726 nlmsg_len(nlh), ib_nl_policy); 727 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; 728 if (ret || !attr) 729 goto settimeout_out; 730 731 timeout = *(int *) nla_data(attr); 732 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN) 733 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN; 734 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) 735 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; 736 737 delta = timeout - sa_local_svc_timeout_ms; 738 if (delta < 0) 739 abs_delta = -delta; 740 else 741 abs_delta = delta; 742 743 if (delta != 0) { 744 spin_lock_irqsave(&ib_nl_request_lock, flags); 745 sa_local_svc_timeout_ms = timeout; 746 list_for_each_entry(query, &ib_nl_request_list, list) { 747 if (delta < 0 && abs_delta > query->timeout) 748 query->timeout = 0; 749 else 750 query->timeout += delta; 751 752 /* Get the new delay from the first entry */ 753 if (!delay) { 754 delay = query->timeout - jiffies; 755 if (delay <= 0) 756 delay = 1; 757 } 758 } 759 if (delay) 760 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 761 (unsigned long)delay); 762 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 763 } 764 765 settimeout_out: 766 return skb->len; 767 } 768 769 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) 770 { 771 struct nlattr *tb[LS_NLA_TYPE_MAX]; 772 int ret; 773 774 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) 775 return 0; 776 777 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 778 nlmsg_len(nlh), ib_nl_policy); 779 if (ret) 780 return 0; 781 782 return 1; 783 } 784 785 static int ib_nl_handle_resolve_resp(struct sk_buff *skb, 786 struct netlink_callback *cb) 787 { 788 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; 789 unsigned long flags; 790 struct ib_sa_query *query; 791 struct ib_mad_send_buf *send_buf; 792 struct ib_mad_send_wc mad_send_wc; 793 int found = 0; 794 int ret; 795 796 if ((nlh->nlmsg_flags & NLM_F_REQUEST) || 797 !(NETLINK_CB(skb).sk) || 798 !netlink_capable(skb, CAP_NET_ADMIN)) 799 return -EPERM; 800 801 spin_lock_irqsave(&ib_nl_request_lock, flags); 802 list_for_each_entry(query, &ib_nl_request_list, list) { 803 /* 804 * If the query is cancelled, let the timeout routine 805 * take care of it. 806 */ 807 if (nlh->nlmsg_seq == query->seq) { 808 found = !ib_sa_query_cancelled(query); 809 if (found) 810 list_del(&query->list); 811 break; 812 } 813 } 814 815 if (!found) { 816 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 817 goto resp_out; 818 } 819 820 send_buf = query->mad_buf; 821 822 if (!ib_nl_is_good_resolve_resp(nlh)) { 823 /* if the result is a failure, send out the packet via IB */ 824 ib_sa_disable_local_svc(query); 825 ret = ib_post_send_mad(query->mad_buf, NULL); 826 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 827 if (ret) { 828 mad_send_wc.send_buf = send_buf; 829 mad_send_wc.status = IB_WC_GENERAL_ERR; 830 send_handler(query->port->agent, &mad_send_wc); 831 } 832 } else { 833 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 834 ib_nl_process_good_resolve_rsp(query, nlh); 835 } 836 837 resp_out: 838 return skb->len; 839 } 840 841 static struct ibnl_client_cbs ib_sa_cb_table[] = { 842 [RDMA_NL_LS_OP_RESOLVE] = { 843 .dump = ib_nl_handle_resolve_resp, 844 .module = THIS_MODULE }, 845 [RDMA_NL_LS_OP_SET_TIMEOUT] = { 846 .dump = ib_nl_handle_set_timeout, 847 .module = THIS_MODULE }, 848 }; 849 850 static void free_sm_ah(struct kref *kref) 851 { 852 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 853 854 ib_destroy_ah(sm_ah->ah); 855 kfree(sm_ah); 856 } 857 858 static void update_sm_ah(struct work_struct *work) 859 { 860 struct ib_sa_port *port = 861 container_of(work, struct ib_sa_port, update_task); 862 struct ib_sa_sm_ah *new_ah; 863 struct ib_port_attr port_attr; 864 struct ib_ah_attr ah_attr; 865 866 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { 867 printk(KERN_WARNING "Couldn't query port\n"); 868 return; 869 } 870 871 new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL); 872 if (!new_ah) { 873 printk(KERN_WARNING "Couldn't allocate new SM AH\n"); 874 return; 875 } 876 877 kref_init(&new_ah->ref); 878 new_ah->src_path_mask = (1 << port_attr.lmc) - 1; 879 880 new_ah->pkey_index = 0; 881 if (ib_find_pkey(port->agent->device, port->port_num, 882 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) 883 printk(KERN_ERR "Couldn't find index for default PKey\n"); 884 885 memset(&ah_attr, 0, sizeof ah_attr); 886 ah_attr.dlid = port_attr.sm_lid; 887 ah_attr.sl = port_attr.sm_sl; 888 ah_attr.port_num = port->port_num; 889 890 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr); 891 if (IS_ERR(new_ah->ah)) { 892 printk(KERN_WARNING "Couldn't create new SM AH\n"); 893 kfree(new_ah); 894 return; 895 } 896 897 spin_lock_irq(&port->ah_lock); 898 if (port->sm_ah) 899 kref_put(&port->sm_ah->ref, free_sm_ah); 900 port->sm_ah = new_ah; 901 spin_unlock_irq(&port->ah_lock); 902 903 } 904 905 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event) 906 { 907 if (event->event == IB_EVENT_PORT_ERR || 908 event->event == IB_EVENT_PORT_ACTIVE || 909 event->event == IB_EVENT_LID_CHANGE || 910 event->event == IB_EVENT_PKEY_CHANGE || 911 event->event == IB_EVENT_SM_CHANGE || 912 event->event == IB_EVENT_CLIENT_REREGISTER) { 913 unsigned long flags; 914 struct ib_sa_device *sa_dev = 915 container_of(handler, typeof(*sa_dev), event_handler); 916 struct ib_sa_port *port = 917 &sa_dev->port[event->element.port_num - sa_dev->start_port]; 918 919 if (!rdma_cap_ib_sa(handler->device, port->port_num)) 920 return; 921 922 spin_lock_irqsave(&port->ah_lock, flags); 923 if (port->sm_ah) 924 kref_put(&port->sm_ah->ref, free_sm_ah); 925 port->sm_ah = NULL; 926 spin_unlock_irqrestore(&port->ah_lock, flags); 927 928 queue_work(ib_wq, &sa_dev->port[event->element.port_num - 929 sa_dev->start_port].update_task); 930 } 931 } 932 933 void ib_sa_register_client(struct ib_sa_client *client) 934 { 935 atomic_set(&client->users, 1); 936 init_completion(&client->comp); 937 } 938 EXPORT_SYMBOL(ib_sa_register_client); 939 940 void ib_sa_unregister_client(struct ib_sa_client *client) 941 { 942 ib_sa_client_put(client); 943 wait_for_completion(&client->comp); 944 } 945 EXPORT_SYMBOL(ib_sa_unregister_client); 946 947 /** 948 * ib_sa_cancel_query - try to cancel an SA query 949 * @id:ID of query to cancel 950 * @query:query pointer to cancel 951 * 952 * Try to cancel an SA query. If the id and query don't match up or 953 * the query has already completed, nothing is done. Otherwise the 954 * query is canceled and will complete with a status of -EINTR. 955 */ 956 void ib_sa_cancel_query(int id, struct ib_sa_query *query) 957 { 958 unsigned long flags; 959 struct ib_mad_agent *agent; 960 struct ib_mad_send_buf *mad_buf; 961 962 spin_lock_irqsave(&idr_lock, flags); 963 if (idr_find(&query_idr, id) != query) { 964 spin_unlock_irqrestore(&idr_lock, flags); 965 return; 966 } 967 agent = query->port->agent; 968 mad_buf = query->mad_buf; 969 spin_unlock_irqrestore(&idr_lock, flags); 970 971 /* 972 * If the query is still on the netlink request list, schedule 973 * it to be cancelled by the timeout routine. Otherwise, it has been 974 * sent to the MAD layer and has to be cancelled from there. 975 */ 976 if (!ib_nl_cancel_request(query)) 977 ib_cancel_mad(agent, mad_buf); 978 } 979 EXPORT_SYMBOL(ib_sa_cancel_query); 980 981 static u8 get_src_path_mask(struct ib_device *device, u8 port_num) 982 { 983 struct ib_sa_device *sa_dev; 984 struct ib_sa_port *port; 985 unsigned long flags; 986 u8 src_path_mask; 987 988 sa_dev = ib_get_client_data(device, &sa_client); 989 if (!sa_dev) 990 return 0x7f; 991 992 port = &sa_dev->port[port_num - sa_dev->start_port]; 993 spin_lock_irqsave(&port->ah_lock, flags); 994 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; 995 spin_unlock_irqrestore(&port->ah_lock, flags); 996 997 return src_path_mask; 998 } 999 1000 int ib_init_ah_from_path(struct ib_device *device, u8 port_num, 1001 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr) 1002 { 1003 int ret; 1004 u16 gid_index; 1005 int use_roce; 1006 struct net_device *ndev = NULL; 1007 1008 memset(ah_attr, 0, sizeof *ah_attr); 1009 ah_attr->dlid = be16_to_cpu(rec->dlid); 1010 ah_attr->sl = rec->sl; 1011 ah_attr->src_path_bits = be16_to_cpu(rec->slid) & 1012 get_src_path_mask(device, port_num); 1013 ah_attr->port_num = port_num; 1014 ah_attr->static_rate = rec->rate; 1015 1016 use_roce = rdma_cap_eth_ah(device, port_num); 1017 1018 if (use_roce) { 1019 struct net_device *idev; 1020 struct net_device *resolved_dev; 1021 struct rdma_dev_addr dev_addr = {.bound_dev_if = rec->ifindex, 1022 .net = rec->net ? rec->net : 1023 &init_net}; 1024 union { 1025 struct sockaddr _sockaddr; 1026 struct sockaddr_in _sockaddr_in; 1027 struct sockaddr_in6 _sockaddr_in6; 1028 } sgid_addr, dgid_addr; 1029 1030 if (!device->get_netdev) 1031 return -EOPNOTSUPP; 1032 1033 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid); 1034 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid); 1035 1036 /* validate the route */ 1037 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr, 1038 &dgid_addr._sockaddr, &dev_addr); 1039 if (ret) 1040 return ret; 1041 1042 if ((dev_addr.network == RDMA_NETWORK_IPV4 || 1043 dev_addr.network == RDMA_NETWORK_IPV6) && 1044 rec->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) 1045 return -EINVAL; 1046 1047 idev = device->get_netdev(device, port_num); 1048 if (!idev) 1049 return -ENODEV; 1050 1051 resolved_dev = dev_get_by_index(dev_addr.net, 1052 dev_addr.bound_dev_if); 1053 if (resolved_dev->flags & IFF_LOOPBACK) { 1054 dev_put(resolved_dev); 1055 resolved_dev = idev; 1056 dev_hold(resolved_dev); 1057 } 1058 ndev = ib_get_ndev_from_path(rec); 1059 rcu_read_lock(); 1060 if ((ndev && ndev != resolved_dev) || 1061 (resolved_dev != idev && 1062 !rdma_is_upper_dev_rcu(idev, resolved_dev))) 1063 ret = -EHOSTUNREACH; 1064 rcu_read_unlock(); 1065 dev_put(idev); 1066 dev_put(resolved_dev); 1067 if (ret) { 1068 if (ndev) 1069 dev_put(ndev); 1070 return ret; 1071 } 1072 } 1073 1074 if (rec->hop_limit > 1 || use_roce) { 1075 ah_attr->ah_flags = IB_AH_GRH; 1076 ah_attr->grh.dgid = rec->dgid; 1077 1078 ret = ib_find_cached_gid_by_port(device, &rec->sgid, 1079 rec->gid_type, port_num, ndev, 1080 &gid_index); 1081 if (ret) { 1082 if (ndev) 1083 dev_put(ndev); 1084 return ret; 1085 } 1086 1087 ah_attr->grh.sgid_index = gid_index; 1088 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label); 1089 ah_attr->grh.hop_limit = rec->hop_limit; 1090 ah_attr->grh.traffic_class = rec->traffic_class; 1091 if (ndev) 1092 dev_put(ndev); 1093 } 1094 1095 if (use_roce) 1096 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN); 1097 1098 return 0; 1099 } 1100 EXPORT_SYMBOL(ib_init_ah_from_path); 1101 1102 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) 1103 { 1104 unsigned long flags; 1105 1106 spin_lock_irqsave(&query->port->ah_lock, flags); 1107 if (!query->port->sm_ah) { 1108 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1109 return -EAGAIN; 1110 } 1111 kref_get(&query->port->sm_ah->ref); 1112 query->sm_ah = query->port->sm_ah; 1113 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1114 1115 query->mad_buf = ib_create_send_mad(query->port->agent, 1, 1116 query->sm_ah->pkey_index, 1117 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 1118 gfp_mask, 1119 IB_MGMT_BASE_VERSION); 1120 if (IS_ERR(query->mad_buf)) { 1121 kref_put(&query->sm_ah->ref, free_sm_ah); 1122 return -ENOMEM; 1123 } 1124 1125 query->mad_buf->ah = query->sm_ah->ah; 1126 1127 return 0; 1128 } 1129 1130 static void free_mad(struct ib_sa_query *query) 1131 { 1132 ib_free_send_mad(query->mad_buf); 1133 kref_put(&query->sm_ah->ref, free_sm_ah); 1134 } 1135 1136 static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) 1137 { 1138 unsigned long flags; 1139 1140 memset(mad, 0, sizeof *mad); 1141 1142 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; 1143 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 1144 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; 1145 1146 spin_lock_irqsave(&tid_lock, flags); 1147 mad->mad_hdr.tid = 1148 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); 1149 spin_unlock_irqrestore(&tid_lock, flags); 1150 } 1151 1152 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) 1153 { 1154 bool preload = gfpflags_allow_blocking(gfp_mask); 1155 unsigned long flags; 1156 int ret, id; 1157 1158 if (preload) 1159 idr_preload(gfp_mask); 1160 spin_lock_irqsave(&idr_lock, flags); 1161 1162 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT); 1163 1164 spin_unlock_irqrestore(&idr_lock, flags); 1165 if (preload) 1166 idr_preload_end(); 1167 if (id < 0) 1168 return id; 1169 1170 query->mad_buf->timeout_ms = timeout_ms; 1171 query->mad_buf->context[0] = query; 1172 query->id = id; 1173 1174 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) { 1175 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) { 1176 if (!ib_nl_make_request(query, gfp_mask)) 1177 return id; 1178 } 1179 ib_sa_disable_local_svc(query); 1180 } 1181 1182 ret = ib_post_send_mad(query->mad_buf, NULL); 1183 if (ret) { 1184 spin_lock_irqsave(&idr_lock, flags); 1185 idr_remove(&query_idr, id); 1186 spin_unlock_irqrestore(&idr_lock, flags); 1187 } 1188 1189 /* 1190 * It's not safe to dereference query any more, because the 1191 * send may already have completed and freed the query in 1192 * another context. 1193 */ 1194 return ret ? ret : id; 1195 } 1196 1197 void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec) 1198 { 1199 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); 1200 } 1201 EXPORT_SYMBOL(ib_sa_unpack_path); 1202 1203 void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute) 1204 { 1205 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); 1206 } 1207 EXPORT_SYMBOL(ib_sa_pack_path); 1208 1209 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 1210 int status, 1211 struct ib_sa_mad *mad) 1212 { 1213 struct ib_sa_path_query *query = 1214 container_of(sa_query, struct ib_sa_path_query, sa_query); 1215 1216 if (mad) { 1217 struct ib_sa_path_rec rec; 1218 1219 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 1220 mad->data, &rec); 1221 rec.net = NULL; 1222 rec.ifindex = 0; 1223 rec.gid_type = IB_GID_TYPE_IB; 1224 memset(rec.dmac, 0, ETH_ALEN); 1225 query->callback(status, &rec, query->context); 1226 } else 1227 query->callback(status, NULL, query->context); 1228 } 1229 1230 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 1231 { 1232 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query)); 1233 } 1234 1235 /** 1236 * ib_sa_path_rec_get - Start a Path get query 1237 * @client:SA client 1238 * @device:device to send query on 1239 * @port_num: port number to send query on 1240 * @rec:Path Record to send in query 1241 * @comp_mask:component mask to send in query 1242 * @timeout_ms:time to wait for response 1243 * @gfp_mask:GFP mask to use for internal allocations 1244 * @callback:function called when query completes, times out or is 1245 * canceled 1246 * @context:opaque user context passed to callback 1247 * @sa_query:query context, used to cancel query 1248 * 1249 * Send a Path Record Get query to the SA to look up a path. The 1250 * callback function will be called when the query completes (or 1251 * fails); status is 0 for a successful response, -EINTR if the query 1252 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1253 * occurred sending the query. The resp parameter of the callback is 1254 * only valid if status is 0. 1255 * 1256 * If the return value of ib_sa_path_rec_get() is negative, it is an 1257 * error code. Otherwise it is a query ID that can be used to cancel 1258 * the query. 1259 */ 1260 int ib_sa_path_rec_get(struct ib_sa_client *client, 1261 struct ib_device *device, u8 port_num, 1262 struct ib_sa_path_rec *rec, 1263 ib_sa_comp_mask comp_mask, 1264 int timeout_ms, gfp_t gfp_mask, 1265 void (*callback)(int status, 1266 struct ib_sa_path_rec *resp, 1267 void *context), 1268 void *context, 1269 struct ib_sa_query **sa_query) 1270 { 1271 struct ib_sa_path_query *query; 1272 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1273 struct ib_sa_port *port; 1274 struct ib_mad_agent *agent; 1275 struct ib_sa_mad *mad; 1276 int ret; 1277 1278 if (!sa_dev) 1279 return -ENODEV; 1280 1281 port = &sa_dev->port[port_num - sa_dev->start_port]; 1282 agent = port->agent; 1283 1284 query = kzalloc(sizeof(*query), gfp_mask); 1285 if (!query) 1286 return -ENOMEM; 1287 1288 query->sa_query.port = port; 1289 ret = alloc_mad(&query->sa_query, gfp_mask); 1290 if (ret) 1291 goto err1; 1292 1293 ib_sa_client_get(client); 1294 query->sa_query.client = client; 1295 query->callback = callback; 1296 query->context = context; 1297 1298 mad = query->sa_query.mad_buf->mad; 1299 init_mad(mad, agent); 1300 1301 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 1302 query->sa_query.release = ib_sa_path_rec_release; 1303 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1304 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 1305 mad->sa_hdr.comp_mask = comp_mask; 1306 1307 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data); 1308 1309 *sa_query = &query->sa_query; 1310 1311 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; 1312 query->sa_query.mad_buf->context[1] = rec; 1313 1314 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1315 if (ret < 0) 1316 goto err2; 1317 1318 return ret; 1319 1320 err2: 1321 *sa_query = NULL; 1322 ib_sa_client_put(query->sa_query.client); 1323 free_mad(&query->sa_query); 1324 1325 err1: 1326 kfree(query); 1327 return ret; 1328 } 1329 EXPORT_SYMBOL(ib_sa_path_rec_get); 1330 1331 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, 1332 int status, 1333 struct ib_sa_mad *mad) 1334 { 1335 struct ib_sa_service_query *query = 1336 container_of(sa_query, struct ib_sa_service_query, sa_query); 1337 1338 if (mad) { 1339 struct ib_sa_service_rec rec; 1340 1341 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), 1342 mad->data, &rec); 1343 query->callback(status, &rec, query->context); 1344 } else 1345 query->callback(status, NULL, query->context); 1346 } 1347 1348 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) 1349 { 1350 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); 1351 } 1352 1353 /** 1354 * ib_sa_service_rec_query - Start Service Record operation 1355 * @client:SA client 1356 * @device:device to send request on 1357 * @port_num: port number to send request on 1358 * @method:SA method - should be get, set, or delete 1359 * @rec:Service Record to send in request 1360 * @comp_mask:component mask to send in request 1361 * @timeout_ms:time to wait for response 1362 * @gfp_mask:GFP mask to use for internal allocations 1363 * @callback:function called when request completes, times out or is 1364 * canceled 1365 * @context:opaque user context passed to callback 1366 * @sa_query:request context, used to cancel request 1367 * 1368 * Send a Service Record set/get/delete to the SA to register, 1369 * unregister or query a service record. 1370 * The callback function will be called when the request completes (or 1371 * fails); status is 0 for a successful response, -EINTR if the query 1372 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1373 * occurred sending the query. The resp parameter of the callback is 1374 * only valid if status is 0. 1375 * 1376 * If the return value of ib_sa_service_rec_query() is negative, it is an 1377 * error code. Otherwise it is a request ID that can be used to cancel 1378 * the query. 1379 */ 1380 int ib_sa_service_rec_query(struct ib_sa_client *client, 1381 struct ib_device *device, u8 port_num, u8 method, 1382 struct ib_sa_service_rec *rec, 1383 ib_sa_comp_mask comp_mask, 1384 int timeout_ms, gfp_t gfp_mask, 1385 void (*callback)(int status, 1386 struct ib_sa_service_rec *resp, 1387 void *context), 1388 void *context, 1389 struct ib_sa_query **sa_query) 1390 { 1391 struct ib_sa_service_query *query; 1392 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1393 struct ib_sa_port *port; 1394 struct ib_mad_agent *agent; 1395 struct ib_sa_mad *mad; 1396 int ret; 1397 1398 if (!sa_dev) 1399 return -ENODEV; 1400 1401 port = &sa_dev->port[port_num - sa_dev->start_port]; 1402 agent = port->agent; 1403 1404 if (method != IB_MGMT_METHOD_GET && 1405 method != IB_MGMT_METHOD_SET && 1406 method != IB_SA_METHOD_DELETE) 1407 return -EINVAL; 1408 1409 query = kzalloc(sizeof(*query), gfp_mask); 1410 if (!query) 1411 return -ENOMEM; 1412 1413 query->sa_query.port = port; 1414 ret = alloc_mad(&query->sa_query, gfp_mask); 1415 if (ret) 1416 goto err1; 1417 1418 ib_sa_client_get(client); 1419 query->sa_query.client = client; 1420 query->callback = callback; 1421 query->context = context; 1422 1423 mad = query->sa_query.mad_buf->mad; 1424 init_mad(mad, agent); 1425 1426 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; 1427 query->sa_query.release = ib_sa_service_rec_release; 1428 mad->mad_hdr.method = method; 1429 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); 1430 mad->sa_hdr.comp_mask = comp_mask; 1431 1432 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), 1433 rec, mad->data); 1434 1435 *sa_query = &query->sa_query; 1436 1437 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1438 if (ret < 0) 1439 goto err2; 1440 1441 return ret; 1442 1443 err2: 1444 *sa_query = NULL; 1445 ib_sa_client_put(query->sa_query.client); 1446 free_mad(&query->sa_query); 1447 1448 err1: 1449 kfree(query); 1450 return ret; 1451 } 1452 EXPORT_SYMBOL(ib_sa_service_rec_query); 1453 1454 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 1455 int status, 1456 struct ib_sa_mad *mad) 1457 { 1458 struct ib_sa_mcmember_query *query = 1459 container_of(sa_query, struct ib_sa_mcmember_query, sa_query); 1460 1461 if (mad) { 1462 struct ib_sa_mcmember_rec rec; 1463 1464 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1465 mad->data, &rec); 1466 query->callback(status, &rec, query->context); 1467 } else 1468 query->callback(status, NULL, query->context); 1469 } 1470 1471 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 1472 { 1473 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 1474 } 1475 1476 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 1477 struct ib_device *device, u8 port_num, 1478 u8 method, 1479 struct ib_sa_mcmember_rec *rec, 1480 ib_sa_comp_mask comp_mask, 1481 int timeout_ms, gfp_t gfp_mask, 1482 void (*callback)(int status, 1483 struct ib_sa_mcmember_rec *resp, 1484 void *context), 1485 void *context, 1486 struct ib_sa_query **sa_query) 1487 { 1488 struct ib_sa_mcmember_query *query; 1489 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1490 struct ib_sa_port *port; 1491 struct ib_mad_agent *agent; 1492 struct ib_sa_mad *mad; 1493 int ret; 1494 1495 if (!sa_dev) 1496 return -ENODEV; 1497 1498 port = &sa_dev->port[port_num - sa_dev->start_port]; 1499 agent = port->agent; 1500 1501 query = kzalloc(sizeof(*query), gfp_mask); 1502 if (!query) 1503 return -ENOMEM; 1504 1505 query->sa_query.port = port; 1506 ret = alloc_mad(&query->sa_query, gfp_mask); 1507 if (ret) 1508 goto err1; 1509 1510 ib_sa_client_get(client); 1511 query->sa_query.client = client; 1512 query->callback = callback; 1513 query->context = context; 1514 1515 mad = query->sa_query.mad_buf->mad; 1516 init_mad(mad, agent); 1517 1518 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 1519 query->sa_query.release = ib_sa_mcmember_rec_release; 1520 mad->mad_hdr.method = method; 1521 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 1522 mad->sa_hdr.comp_mask = comp_mask; 1523 1524 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1525 rec, mad->data); 1526 1527 *sa_query = &query->sa_query; 1528 1529 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1530 if (ret < 0) 1531 goto err2; 1532 1533 return ret; 1534 1535 err2: 1536 *sa_query = NULL; 1537 ib_sa_client_put(query->sa_query.client); 1538 free_mad(&query->sa_query); 1539 1540 err1: 1541 kfree(query); 1542 return ret; 1543 } 1544 1545 /* Support GuidInfoRecord */ 1546 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, 1547 int status, 1548 struct ib_sa_mad *mad) 1549 { 1550 struct ib_sa_guidinfo_query *query = 1551 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); 1552 1553 if (mad) { 1554 struct ib_sa_guidinfo_rec rec; 1555 1556 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), 1557 mad->data, &rec); 1558 query->callback(status, &rec, query->context); 1559 } else 1560 query->callback(status, NULL, query->context); 1561 } 1562 1563 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) 1564 { 1565 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); 1566 } 1567 1568 int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 1569 struct ib_device *device, u8 port_num, 1570 struct ib_sa_guidinfo_rec *rec, 1571 ib_sa_comp_mask comp_mask, u8 method, 1572 int timeout_ms, gfp_t gfp_mask, 1573 void (*callback)(int status, 1574 struct ib_sa_guidinfo_rec *resp, 1575 void *context), 1576 void *context, 1577 struct ib_sa_query **sa_query) 1578 { 1579 struct ib_sa_guidinfo_query *query; 1580 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1581 struct ib_sa_port *port; 1582 struct ib_mad_agent *agent; 1583 struct ib_sa_mad *mad; 1584 int ret; 1585 1586 if (!sa_dev) 1587 return -ENODEV; 1588 1589 if (method != IB_MGMT_METHOD_GET && 1590 method != IB_MGMT_METHOD_SET && 1591 method != IB_SA_METHOD_DELETE) { 1592 return -EINVAL; 1593 } 1594 1595 port = &sa_dev->port[port_num - sa_dev->start_port]; 1596 agent = port->agent; 1597 1598 query = kzalloc(sizeof(*query), gfp_mask); 1599 if (!query) 1600 return -ENOMEM; 1601 1602 query->sa_query.port = port; 1603 ret = alloc_mad(&query->sa_query, gfp_mask); 1604 if (ret) 1605 goto err1; 1606 1607 ib_sa_client_get(client); 1608 query->sa_query.client = client; 1609 query->callback = callback; 1610 query->context = context; 1611 1612 mad = query->sa_query.mad_buf->mad; 1613 init_mad(mad, agent); 1614 1615 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; 1616 query->sa_query.release = ib_sa_guidinfo_rec_release; 1617 1618 mad->mad_hdr.method = method; 1619 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); 1620 mad->sa_hdr.comp_mask = comp_mask; 1621 1622 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, 1623 mad->data); 1624 1625 *sa_query = &query->sa_query; 1626 1627 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1628 if (ret < 0) 1629 goto err2; 1630 1631 return ret; 1632 1633 err2: 1634 *sa_query = NULL; 1635 ib_sa_client_put(query->sa_query.client); 1636 free_mad(&query->sa_query); 1637 1638 err1: 1639 kfree(query); 1640 return ret; 1641 } 1642 EXPORT_SYMBOL(ib_sa_guid_info_rec_query); 1643 1644 static void send_handler(struct ib_mad_agent *agent, 1645 struct ib_mad_send_wc *mad_send_wc) 1646 { 1647 struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 1648 unsigned long flags; 1649 1650 if (query->callback) 1651 switch (mad_send_wc->status) { 1652 case IB_WC_SUCCESS: 1653 /* No callback -- already got recv */ 1654 break; 1655 case IB_WC_RESP_TIMEOUT_ERR: 1656 query->callback(query, -ETIMEDOUT, NULL); 1657 break; 1658 case IB_WC_WR_FLUSH_ERR: 1659 query->callback(query, -EINTR, NULL); 1660 break; 1661 default: 1662 query->callback(query, -EIO, NULL); 1663 break; 1664 } 1665 1666 spin_lock_irqsave(&idr_lock, flags); 1667 idr_remove(&query_idr, query->id); 1668 spin_unlock_irqrestore(&idr_lock, flags); 1669 1670 free_mad(query); 1671 ib_sa_client_put(query->client); 1672 query->release(query); 1673 } 1674 1675 static void recv_handler(struct ib_mad_agent *mad_agent, 1676 struct ib_mad_send_buf *send_buf, 1677 struct ib_mad_recv_wc *mad_recv_wc) 1678 { 1679 struct ib_sa_query *query; 1680 1681 if (!send_buf) 1682 return; 1683 1684 query = send_buf->context[0]; 1685 if (query->callback) { 1686 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 1687 query->callback(query, 1688 mad_recv_wc->recv_buf.mad->mad_hdr.status ? 1689 -EINVAL : 0, 1690 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); 1691 else 1692 query->callback(query, -EIO, NULL); 1693 } 1694 1695 ib_free_recv_mad(mad_recv_wc); 1696 } 1697 1698 static void ib_sa_add_one(struct ib_device *device) 1699 { 1700 struct ib_sa_device *sa_dev; 1701 int s, e, i; 1702 int count = 0; 1703 1704 s = rdma_start_port(device); 1705 e = rdma_end_port(device); 1706 1707 sa_dev = kzalloc(sizeof *sa_dev + 1708 (e - s + 1) * sizeof (struct ib_sa_port), 1709 GFP_KERNEL); 1710 if (!sa_dev) 1711 return; 1712 1713 sa_dev->start_port = s; 1714 sa_dev->end_port = e; 1715 1716 for (i = 0; i <= e - s; ++i) { 1717 spin_lock_init(&sa_dev->port[i].ah_lock); 1718 if (!rdma_cap_ib_sa(device, i + 1)) 1719 continue; 1720 1721 sa_dev->port[i].sm_ah = NULL; 1722 sa_dev->port[i].port_num = i + s; 1723 1724 sa_dev->port[i].agent = 1725 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 1726 NULL, 0, send_handler, 1727 recv_handler, sa_dev, 0); 1728 if (IS_ERR(sa_dev->port[i].agent)) 1729 goto err; 1730 1731 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 1732 1733 count++; 1734 } 1735 1736 if (!count) 1737 goto free; 1738 1739 ib_set_client_data(device, &sa_client, sa_dev); 1740 1741 /* 1742 * We register our event handler after everything is set up, 1743 * and then update our cached info after the event handler is 1744 * registered to avoid any problems if a port changes state 1745 * during our initialization. 1746 */ 1747 1748 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); 1749 if (ib_register_event_handler(&sa_dev->event_handler)) 1750 goto err; 1751 1752 for (i = 0; i <= e - s; ++i) { 1753 if (rdma_cap_ib_sa(device, i + 1)) 1754 update_sm_ah(&sa_dev->port[i].update_task); 1755 } 1756 1757 return; 1758 1759 err: 1760 while (--i >= 0) { 1761 if (rdma_cap_ib_sa(device, i + 1)) 1762 ib_unregister_mad_agent(sa_dev->port[i].agent); 1763 } 1764 free: 1765 kfree(sa_dev); 1766 return; 1767 } 1768 1769 static void ib_sa_remove_one(struct ib_device *device, void *client_data) 1770 { 1771 struct ib_sa_device *sa_dev = client_data; 1772 int i; 1773 1774 if (!sa_dev) 1775 return; 1776 1777 ib_unregister_event_handler(&sa_dev->event_handler); 1778 1779 flush_workqueue(ib_wq); 1780 1781 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 1782 if (rdma_cap_ib_sa(device, i + 1)) { 1783 ib_unregister_mad_agent(sa_dev->port[i].agent); 1784 if (sa_dev->port[i].sm_ah) 1785 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 1786 } 1787 1788 } 1789 1790 kfree(sa_dev); 1791 } 1792 1793 static int __init ib_sa_init(void) 1794 { 1795 int ret; 1796 1797 get_random_bytes(&tid, sizeof tid); 1798 1799 atomic_set(&ib_nl_sa_request_seq, 0); 1800 1801 ret = ib_register_client(&sa_client); 1802 if (ret) { 1803 printk(KERN_ERR "Couldn't register ib_sa client\n"); 1804 goto err1; 1805 } 1806 1807 ret = mcast_init(); 1808 if (ret) { 1809 printk(KERN_ERR "Couldn't initialize multicast handling\n"); 1810 goto err2; 1811 } 1812 1813 ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq"); 1814 if (!ib_nl_wq) { 1815 ret = -ENOMEM; 1816 goto err3; 1817 } 1818 1819 if (ibnl_add_client(RDMA_NL_LS, RDMA_NL_LS_NUM_OPS, 1820 ib_sa_cb_table)) { 1821 pr_err("Failed to add netlink callback\n"); 1822 ret = -EINVAL; 1823 goto err4; 1824 } 1825 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); 1826 1827 return 0; 1828 err4: 1829 destroy_workqueue(ib_nl_wq); 1830 err3: 1831 mcast_cleanup(); 1832 err2: 1833 ib_unregister_client(&sa_client); 1834 err1: 1835 return ret; 1836 } 1837 1838 static void __exit ib_sa_cleanup(void) 1839 { 1840 ibnl_remove_client(RDMA_NL_LS); 1841 cancel_delayed_work(&ib_nl_timed_work); 1842 flush_workqueue(ib_nl_wq); 1843 destroy_workqueue(ib_nl_wq); 1844 mcast_cleanup(); 1845 ib_unregister_client(&sa_client); 1846 idr_destroy(&query_idr); 1847 } 1848 1849 module_init(ib_sa_init); 1850 module_exit(ib_sa_cleanup); 1851