1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/err.h> 38 #include <linux/random.h> 39 #include <linux/spinlock.h> 40 #include <linux/slab.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kref.h> 43 #include <linux/idr.h> 44 #include <linux/workqueue.h> 45 #include <uapi/linux/if_ether.h> 46 #include <rdma/ib_pack.h> 47 #include <rdma/ib_cache.h> 48 #include <rdma/rdma_netlink.h> 49 #include <net/netlink.h> 50 #include <uapi/rdma/ib_user_sa.h> 51 #include <rdma/ib_marshall.h> 52 #include "sa.h" 53 54 MODULE_AUTHOR("Roland Dreier"); 55 MODULE_DESCRIPTION("InfiniBand subnet administration query support"); 56 MODULE_LICENSE("Dual BSD/GPL"); 57 58 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 59 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 60 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 61 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT; 62 63 struct ib_sa_sm_ah { 64 struct ib_ah *ah; 65 struct kref ref; 66 u16 pkey_index; 67 u8 src_path_mask; 68 }; 69 70 struct ib_sa_port { 71 struct ib_mad_agent *agent; 72 struct ib_sa_sm_ah *sm_ah; 73 struct work_struct update_task; 74 spinlock_t ah_lock; 75 u8 port_num; 76 }; 77 78 struct ib_sa_device { 79 int start_port, end_port; 80 struct ib_event_handler event_handler; 81 struct ib_sa_port port[0]; 82 }; 83 84 struct ib_sa_query { 85 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); 86 void (*release)(struct ib_sa_query *); 87 struct ib_sa_client *client; 88 struct ib_sa_port *port; 89 struct ib_mad_send_buf *mad_buf; 90 struct ib_sa_sm_ah *sm_ah; 91 int id; 92 u32 flags; 93 struct list_head list; /* Local svc request list */ 94 u32 seq; /* Local svc request sequence number */ 95 unsigned long timeout; /* Local svc timeout */ 96 u8 path_use; /* How will the pathrecord be used */ 97 }; 98 99 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 100 #define IB_SA_CANCEL 0x00000002 101 102 struct ib_sa_service_query { 103 void (*callback)(int, struct ib_sa_service_rec *, void *); 104 void *context; 105 struct ib_sa_query sa_query; 106 }; 107 108 struct ib_sa_path_query { 109 void (*callback)(int, struct ib_sa_path_rec *, void *); 110 void *context; 111 struct ib_sa_query sa_query; 112 }; 113 114 struct ib_sa_guidinfo_query { 115 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); 116 void *context; 117 struct ib_sa_query sa_query; 118 }; 119 120 struct ib_sa_mcmember_query { 121 void (*callback)(int, struct ib_sa_mcmember_rec *, void *); 122 void *context; 123 struct ib_sa_query sa_query; 124 }; 125 126 static LIST_HEAD(ib_nl_request_list); 127 static DEFINE_SPINLOCK(ib_nl_request_lock); 128 static atomic_t ib_nl_sa_request_seq; 129 static struct workqueue_struct *ib_nl_wq; 130 static struct delayed_work ib_nl_timed_work; 131 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = { 132 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY, 133 .len = sizeof(struct ib_path_rec_data)}, 134 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32}, 135 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64}, 136 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, 137 .len = sizeof(struct rdma_nla_ls_gid)}, 138 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY, 139 .len = sizeof(struct rdma_nla_ls_gid)}, 140 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8}, 141 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16}, 142 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16}, 143 }; 144 145 146 static void ib_sa_add_one(struct ib_device *device); 147 static void ib_sa_remove_one(struct ib_device *device, void *client_data); 148 149 static struct ib_client sa_client = { 150 .name = "sa", 151 .add = ib_sa_add_one, 152 .remove = ib_sa_remove_one 153 }; 154 155 static DEFINE_SPINLOCK(idr_lock); 156 static DEFINE_IDR(query_idr); 157 158 static DEFINE_SPINLOCK(tid_lock); 159 static u32 tid; 160 161 #define PATH_REC_FIELD(field) \ 162 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \ 163 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \ 164 .field_name = "sa_path_rec:" #field 165 166 static const struct ib_field path_rec_table[] = { 167 { PATH_REC_FIELD(service_id), 168 .offset_words = 0, 169 .offset_bits = 0, 170 .size_bits = 64 }, 171 { PATH_REC_FIELD(dgid), 172 .offset_words = 2, 173 .offset_bits = 0, 174 .size_bits = 128 }, 175 { PATH_REC_FIELD(sgid), 176 .offset_words = 6, 177 .offset_bits = 0, 178 .size_bits = 128 }, 179 { PATH_REC_FIELD(dlid), 180 .offset_words = 10, 181 .offset_bits = 0, 182 .size_bits = 16 }, 183 { PATH_REC_FIELD(slid), 184 .offset_words = 10, 185 .offset_bits = 16, 186 .size_bits = 16 }, 187 { PATH_REC_FIELD(raw_traffic), 188 .offset_words = 11, 189 .offset_bits = 0, 190 .size_bits = 1 }, 191 { RESERVED, 192 .offset_words = 11, 193 .offset_bits = 1, 194 .size_bits = 3 }, 195 { PATH_REC_FIELD(flow_label), 196 .offset_words = 11, 197 .offset_bits = 4, 198 .size_bits = 20 }, 199 { PATH_REC_FIELD(hop_limit), 200 .offset_words = 11, 201 .offset_bits = 24, 202 .size_bits = 8 }, 203 { PATH_REC_FIELD(traffic_class), 204 .offset_words = 12, 205 .offset_bits = 0, 206 .size_bits = 8 }, 207 { PATH_REC_FIELD(reversible), 208 .offset_words = 12, 209 .offset_bits = 8, 210 .size_bits = 1 }, 211 { PATH_REC_FIELD(numb_path), 212 .offset_words = 12, 213 .offset_bits = 9, 214 .size_bits = 7 }, 215 { PATH_REC_FIELD(pkey), 216 .offset_words = 12, 217 .offset_bits = 16, 218 .size_bits = 16 }, 219 { PATH_REC_FIELD(qos_class), 220 .offset_words = 13, 221 .offset_bits = 0, 222 .size_bits = 12 }, 223 { PATH_REC_FIELD(sl), 224 .offset_words = 13, 225 .offset_bits = 12, 226 .size_bits = 4 }, 227 { PATH_REC_FIELD(mtu_selector), 228 .offset_words = 13, 229 .offset_bits = 16, 230 .size_bits = 2 }, 231 { PATH_REC_FIELD(mtu), 232 .offset_words = 13, 233 .offset_bits = 18, 234 .size_bits = 6 }, 235 { PATH_REC_FIELD(rate_selector), 236 .offset_words = 13, 237 .offset_bits = 24, 238 .size_bits = 2 }, 239 { PATH_REC_FIELD(rate), 240 .offset_words = 13, 241 .offset_bits = 26, 242 .size_bits = 6 }, 243 { PATH_REC_FIELD(packet_life_time_selector), 244 .offset_words = 14, 245 .offset_bits = 0, 246 .size_bits = 2 }, 247 { PATH_REC_FIELD(packet_life_time), 248 .offset_words = 14, 249 .offset_bits = 2, 250 .size_bits = 6 }, 251 { PATH_REC_FIELD(preference), 252 .offset_words = 14, 253 .offset_bits = 8, 254 .size_bits = 8 }, 255 { RESERVED, 256 .offset_words = 14, 257 .offset_bits = 16, 258 .size_bits = 48 }, 259 }; 260 261 #define MCMEMBER_REC_FIELD(field) \ 262 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ 263 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \ 264 .field_name = "sa_mcmember_rec:" #field 265 266 static const struct ib_field mcmember_rec_table[] = { 267 { MCMEMBER_REC_FIELD(mgid), 268 .offset_words = 0, 269 .offset_bits = 0, 270 .size_bits = 128 }, 271 { MCMEMBER_REC_FIELD(port_gid), 272 .offset_words = 4, 273 .offset_bits = 0, 274 .size_bits = 128 }, 275 { MCMEMBER_REC_FIELD(qkey), 276 .offset_words = 8, 277 .offset_bits = 0, 278 .size_bits = 32 }, 279 { MCMEMBER_REC_FIELD(mlid), 280 .offset_words = 9, 281 .offset_bits = 0, 282 .size_bits = 16 }, 283 { MCMEMBER_REC_FIELD(mtu_selector), 284 .offset_words = 9, 285 .offset_bits = 16, 286 .size_bits = 2 }, 287 { MCMEMBER_REC_FIELD(mtu), 288 .offset_words = 9, 289 .offset_bits = 18, 290 .size_bits = 6 }, 291 { MCMEMBER_REC_FIELD(traffic_class), 292 .offset_words = 9, 293 .offset_bits = 24, 294 .size_bits = 8 }, 295 { MCMEMBER_REC_FIELD(pkey), 296 .offset_words = 10, 297 .offset_bits = 0, 298 .size_bits = 16 }, 299 { MCMEMBER_REC_FIELD(rate_selector), 300 .offset_words = 10, 301 .offset_bits = 16, 302 .size_bits = 2 }, 303 { MCMEMBER_REC_FIELD(rate), 304 .offset_words = 10, 305 .offset_bits = 18, 306 .size_bits = 6 }, 307 { MCMEMBER_REC_FIELD(packet_life_time_selector), 308 .offset_words = 10, 309 .offset_bits = 24, 310 .size_bits = 2 }, 311 { MCMEMBER_REC_FIELD(packet_life_time), 312 .offset_words = 10, 313 .offset_bits = 26, 314 .size_bits = 6 }, 315 { MCMEMBER_REC_FIELD(sl), 316 .offset_words = 11, 317 .offset_bits = 0, 318 .size_bits = 4 }, 319 { MCMEMBER_REC_FIELD(flow_label), 320 .offset_words = 11, 321 .offset_bits = 4, 322 .size_bits = 20 }, 323 { MCMEMBER_REC_FIELD(hop_limit), 324 .offset_words = 11, 325 .offset_bits = 24, 326 .size_bits = 8 }, 327 { MCMEMBER_REC_FIELD(scope), 328 .offset_words = 12, 329 .offset_bits = 0, 330 .size_bits = 4 }, 331 { MCMEMBER_REC_FIELD(join_state), 332 .offset_words = 12, 333 .offset_bits = 4, 334 .size_bits = 4 }, 335 { MCMEMBER_REC_FIELD(proxy_join), 336 .offset_words = 12, 337 .offset_bits = 8, 338 .size_bits = 1 }, 339 { RESERVED, 340 .offset_words = 12, 341 .offset_bits = 9, 342 .size_bits = 23 }, 343 }; 344 345 #define SERVICE_REC_FIELD(field) \ 346 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \ 347 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \ 348 .field_name = "sa_service_rec:" #field 349 350 static const struct ib_field service_rec_table[] = { 351 { SERVICE_REC_FIELD(id), 352 .offset_words = 0, 353 .offset_bits = 0, 354 .size_bits = 64 }, 355 { SERVICE_REC_FIELD(gid), 356 .offset_words = 2, 357 .offset_bits = 0, 358 .size_bits = 128 }, 359 { SERVICE_REC_FIELD(pkey), 360 .offset_words = 6, 361 .offset_bits = 0, 362 .size_bits = 16 }, 363 { SERVICE_REC_FIELD(lease), 364 .offset_words = 7, 365 .offset_bits = 0, 366 .size_bits = 32 }, 367 { SERVICE_REC_FIELD(key), 368 .offset_words = 8, 369 .offset_bits = 0, 370 .size_bits = 128 }, 371 { SERVICE_REC_FIELD(name), 372 .offset_words = 12, 373 .offset_bits = 0, 374 .size_bits = 64*8 }, 375 { SERVICE_REC_FIELD(data8), 376 .offset_words = 28, 377 .offset_bits = 0, 378 .size_bits = 16*8 }, 379 { SERVICE_REC_FIELD(data16), 380 .offset_words = 32, 381 .offset_bits = 0, 382 .size_bits = 8*16 }, 383 { SERVICE_REC_FIELD(data32), 384 .offset_words = 36, 385 .offset_bits = 0, 386 .size_bits = 4*32 }, 387 { SERVICE_REC_FIELD(data64), 388 .offset_words = 40, 389 .offset_bits = 0, 390 .size_bits = 2*64 }, 391 }; 392 393 #define GUIDINFO_REC_FIELD(field) \ 394 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ 395 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ 396 .field_name = "sa_guidinfo_rec:" #field 397 398 static const struct ib_field guidinfo_rec_table[] = { 399 { GUIDINFO_REC_FIELD(lid), 400 .offset_words = 0, 401 .offset_bits = 0, 402 .size_bits = 16 }, 403 { GUIDINFO_REC_FIELD(block_num), 404 .offset_words = 0, 405 .offset_bits = 16, 406 .size_bits = 8 }, 407 { GUIDINFO_REC_FIELD(res1), 408 .offset_words = 0, 409 .offset_bits = 24, 410 .size_bits = 8 }, 411 { GUIDINFO_REC_FIELD(res2), 412 .offset_words = 1, 413 .offset_bits = 0, 414 .size_bits = 32 }, 415 { GUIDINFO_REC_FIELD(guid_info_list), 416 .offset_words = 2, 417 .offset_bits = 0, 418 .size_bits = 512 }, 419 }; 420 421 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) 422 { 423 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; 424 } 425 426 static inline int ib_sa_query_cancelled(struct ib_sa_query *query) 427 { 428 return (query->flags & IB_SA_CANCEL); 429 } 430 431 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, 432 struct ib_sa_query *query) 433 { 434 struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1]; 435 struct ib_sa_mad *mad = query->mad_buf->mad; 436 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; 437 u16 val16; 438 u64 val64; 439 struct rdma_ls_resolve_header *header; 440 441 query->mad_buf->context[1] = NULL; 442 443 /* Construct the family header first */ 444 header = (struct rdma_ls_resolve_header *) 445 skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 446 memcpy(header->device_name, query->port->agent->device->name, 447 LS_DEVICE_NAME_MAX); 448 header->port_num = query->port->port_num; 449 450 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && 451 sa_rec->reversible != 0) 452 query->path_use = LS_RESOLVE_PATH_USE_GMP; 453 else 454 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; 455 header->path_use = query->path_use; 456 457 /* Now build the attributes */ 458 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 459 val64 = be64_to_cpu(sa_rec->service_id); 460 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 461 sizeof(val64), &val64); 462 } 463 if (comp_mask & IB_SA_PATH_REC_DGID) 464 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID, 465 sizeof(sa_rec->dgid), &sa_rec->dgid); 466 if (comp_mask & IB_SA_PATH_REC_SGID) 467 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID, 468 sizeof(sa_rec->sgid), &sa_rec->sgid); 469 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 470 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS, 471 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class); 472 473 if (comp_mask & IB_SA_PATH_REC_PKEY) { 474 val16 = be16_to_cpu(sa_rec->pkey); 475 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY, 476 sizeof(val16), &val16); 477 } 478 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) { 479 val16 = be16_to_cpu(sa_rec->qos_class); 480 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS, 481 sizeof(val16), &val16); 482 } 483 } 484 485 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) 486 { 487 int len = 0; 488 489 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) 490 len += nla_total_size(sizeof(u64)); 491 if (comp_mask & IB_SA_PATH_REC_DGID) 492 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 493 if (comp_mask & IB_SA_PATH_REC_SGID) 494 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 495 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 496 len += nla_total_size(sizeof(u8)); 497 if (comp_mask & IB_SA_PATH_REC_PKEY) 498 len += nla_total_size(sizeof(u16)); 499 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) 500 len += nla_total_size(sizeof(u16)); 501 502 /* 503 * Make sure that at least some of the required comp_mask bits are 504 * set. 505 */ 506 if (WARN_ON(len == 0)) 507 return len; 508 509 /* Add the family header */ 510 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header)); 511 512 return len; 513 } 514 515 static int ib_nl_send_msg(struct ib_sa_query *query) 516 { 517 struct sk_buff *skb = NULL; 518 struct nlmsghdr *nlh; 519 void *data; 520 int ret = 0; 521 struct ib_sa_mad *mad; 522 int len; 523 524 mad = query->mad_buf->mad; 525 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); 526 if (len <= 0) 527 return -EMSGSIZE; 528 529 skb = nlmsg_new(len, GFP_KERNEL); 530 if (!skb) 531 return -ENOMEM; 532 533 /* Put nlmsg header only for now */ 534 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, 535 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); 536 if (!data) { 537 kfree_skb(skb); 538 return -EMSGSIZE; 539 } 540 541 /* Add attributes */ 542 ib_nl_set_path_rec_attrs(skb, query); 543 544 /* Repair the nlmsg header length */ 545 nlmsg_end(skb, nlh); 546 547 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL); 548 if (!ret) 549 ret = len; 550 else 551 ret = 0; 552 553 return ret; 554 } 555 556 static int ib_nl_make_request(struct ib_sa_query *query) 557 { 558 unsigned long flags; 559 unsigned long delay; 560 int ret; 561 562 INIT_LIST_HEAD(&query->list); 563 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 564 565 spin_lock_irqsave(&ib_nl_request_lock, flags); 566 ret = ib_nl_send_msg(query); 567 if (ret <= 0) { 568 ret = -EIO; 569 goto request_out; 570 } else { 571 ret = 0; 572 } 573 574 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 575 query->timeout = delay + jiffies; 576 list_add_tail(&query->list, &ib_nl_request_list); 577 /* Start the timeout if this is the only request */ 578 if (ib_nl_request_list.next == &query->list) 579 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 580 581 request_out: 582 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 583 584 return ret; 585 } 586 587 static int ib_nl_cancel_request(struct ib_sa_query *query) 588 { 589 unsigned long flags; 590 struct ib_sa_query *wait_query; 591 int found = 0; 592 593 spin_lock_irqsave(&ib_nl_request_lock, flags); 594 list_for_each_entry(wait_query, &ib_nl_request_list, list) { 595 /* Let the timeout to take care of the callback */ 596 if (query == wait_query) { 597 query->flags |= IB_SA_CANCEL; 598 query->timeout = jiffies; 599 list_move(&query->list, &ib_nl_request_list); 600 found = 1; 601 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1); 602 break; 603 } 604 } 605 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 606 607 return found; 608 } 609 610 static void send_handler(struct ib_mad_agent *agent, 611 struct ib_mad_send_wc *mad_send_wc); 612 613 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, 614 const struct nlmsghdr *nlh) 615 { 616 struct ib_mad_send_wc mad_send_wc; 617 struct ib_sa_mad *mad = NULL; 618 const struct nlattr *head, *curr; 619 struct ib_path_rec_data *rec; 620 int len, rem; 621 u32 mask = 0; 622 int status = -EIO; 623 624 if (query->callback) { 625 head = (const struct nlattr *) nlmsg_data(nlh); 626 len = nlmsg_len(nlh); 627 switch (query->path_use) { 628 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL: 629 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND; 630 break; 631 632 case LS_RESOLVE_PATH_USE_ALL: 633 case LS_RESOLVE_PATH_USE_GMP: 634 default: 635 mask = IB_PATH_PRIMARY | IB_PATH_GMP | 636 IB_PATH_BIDIRECTIONAL; 637 break; 638 } 639 nla_for_each_attr(curr, head, len, rem) { 640 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) { 641 rec = nla_data(curr); 642 /* 643 * Get the first one. In the future, we may 644 * need to get up to 6 pathrecords. 645 */ 646 if ((rec->flags & mask) == mask) { 647 mad = query->mad_buf->mad; 648 mad->mad_hdr.method |= 649 IB_MGMT_METHOD_RESP; 650 memcpy(mad->data, rec->path_rec, 651 sizeof(rec->path_rec)); 652 status = 0; 653 break; 654 } 655 } 656 } 657 query->callback(query, status, mad); 658 } 659 660 mad_send_wc.send_buf = query->mad_buf; 661 mad_send_wc.status = IB_WC_SUCCESS; 662 send_handler(query->mad_buf->mad_agent, &mad_send_wc); 663 } 664 665 static void ib_nl_request_timeout(struct work_struct *work) 666 { 667 unsigned long flags; 668 struct ib_sa_query *query; 669 unsigned long delay; 670 struct ib_mad_send_wc mad_send_wc; 671 int ret; 672 673 spin_lock_irqsave(&ib_nl_request_lock, flags); 674 while (!list_empty(&ib_nl_request_list)) { 675 query = list_entry(ib_nl_request_list.next, 676 struct ib_sa_query, list); 677 678 if (time_after(query->timeout, jiffies)) { 679 delay = query->timeout - jiffies; 680 if ((long)delay <= 0) 681 delay = 1; 682 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 683 break; 684 } 685 686 list_del(&query->list); 687 ib_sa_disable_local_svc(query); 688 /* Hold the lock to protect against query cancellation */ 689 if (ib_sa_query_cancelled(query)) 690 ret = -1; 691 else 692 ret = ib_post_send_mad(query->mad_buf, NULL); 693 if (ret) { 694 mad_send_wc.send_buf = query->mad_buf; 695 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 696 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 697 send_handler(query->port->agent, &mad_send_wc); 698 spin_lock_irqsave(&ib_nl_request_lock, flags); 699 } 700 } 701 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 702 } 703 704 static int ib_nl_handle_set_timeout(struct sk_buff *skb, 705 struct netlink_callback *cb) 706 { 707 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; 708 int timeout, delta, abs_delta; 709 const struct nlattr *attr; 710 unsigned long flags; 711 struct ib_sa_query *query; 712 long delay = 0; 713 struct nlattr *tb[LS_NLA_TYPE_MAX]; 714 int ret; 715 716 if (!netlink_capable(skb, CAP_NET_ADMIN)) 717 return -EPERM; 718 719 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 720 nlmsg_len(nlh), ib_nl_policy); 721 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; 722 if (ret || !attr) 723 goto settimeout_out; 724 725 timeout = *(int *) nla_data(attr); 726 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN) 727 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN; 728 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) 729 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; 730 731 delta = timeout - sa_local_svc_timeout_ms; 732 if (delta < 0) 733 abs_delta = -delta; 734 else 735 abs_delta = delta; 736 737 if (delta != 0) { 738 spin_lock_irqsave(&ib_nl_request_lock, flags); 739 sa_local_svc_timeout_ms = timeout; 740 list_for_each_entry(query, &ib_nl_request_list, list) { 741 if (delta < 0 && abs_delta > query->timeout) 742 query->timeout = 0; 743 else 744 query->timeout += delta; 745 746 /* Get the new delay from the first entry */ 747 if (!delay) { 748 delay = query->timeout - jiffies; 749 if (delay <= 0) 750 delay = 1; 751 } 752 } 753 if (delay) 754 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 755 (unsigned long)delay); 756 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 757 } 758 759 settimeout_out: 760 return skb->len; 761 } 762 763 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) 764 { 765 struct nlattr *tb[LS_NLA_TYPE_MAX]; 766 int ret; 767 768 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) 769 return 0; 770 771 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 772 nlmsg_len(nlh), ib_nl_policy); 773 if (ret) 774 return 0; 775 776 return 1; 777 } 778 779 static int ib_nl_handle_resolve_resp(struct sk_buff *skb, 780 struct netlink_callback *cb) 781 { 782 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; 783 unsigned long flags; 784 struct ib_sa_query *query; 785 struct ib_mad_send_buf *send_buf; 786 struct ib_mad_send_wc mad_send_wc; 787 int found = 0; 788 int ret; 789 790 if (!netlink_capable(skb, CAP_NET_ADMIN)) 791 return -EPERM; 792 793 spin_lock_irqsave(&ib_nl_request_lock, flags); 794 list_for_each_entry(query, &ib_nl_request_list, list) { 795 /* 796 * If the query is cancelled, let the timeout routine 797 * take care of it. 798 */ 799 if (nlh->nlmsg_seq == query->seq) { 800 found = !ib_sa_query_cancelled(query); 801 if (found) 802 list_del(&query->list); 803 break; 804 } 805 } 806 807 if (!found) { 808 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 809 goto resp_out; 810 } 811 812 send_buf = query->mad_buf; 813 814 if (!ib_nl_is_good_resolve_resp(nlh)) { 815 /* if the result is a failure, send out the packet via IB */ 816 ib_sa_disable_local_svc(query); 817 ret = ib_post_send_mad(query->mad_buf, NULL); 818 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 819 if (ret) { 820 mad_send_wc.send_buf = send_buf; 821 mad_send_wc.status = IB_WC_GENERAL_ERR; 822 send_handler(query->port->agent, &mad_send_wc); 823 } 824 } else { 825 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 826 ib_nl_process_good_resolve_rsp(query, nlh); 827 } 828 829 resp_out: 830 return skb->len; 831 } 832 833 static struct ibnl_client_cbs ib_sa_cb_table[] = { 834 [RDMA_NL_LS_OP_RESOLVE] = { 835 .dump = ib_nl_handle_resolve_resp, 836 .module = THIS_MODULE }, 837 [RDMA_NL_LS_OP_SET_TIMEOUT] = { 838 .dump = ib_nl_handle_set_timeout, 839 .module = THIS_MODULE }, 840 }; 841 842 static void free_sm_ah(struct kref *kref) 843 { 844 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 845 846 ib_destroy_ah(sm_ah->ah); 847 kfree(sm_ah); 848 } 849 850 static void update_sm_ah(struct work_struct *work) 851 { 852 struct ib_sa_port *port = 853 container_of(work, struct ib_sa_port, update_task); 854 struct ib_sa_sm_ah *new_ah; 855 struct ib_port_attr port_attr; 856 struct ib_ah_attr ah_attr; 857 858 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { 859 printk(KERN_WARNING "Couldn't query port\n"); 860 return; 861 } 862 863 new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL); 864 if (!new_ah) { 865 printk(KERN_WARNING "Couldn't allocate new SM AH\n"); 866 return; 867 } 868 869 kref_init(&new_ah->ref); 870 new_ah->src_path_mask = (1 << port_attr.lmc) - 1; 871 872 new_ah->pkey_index = 0; 873 if (ib_find_pkey(port->agent->device, port->port_num, 874 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) 875 printk(KERN_ERR "Couldn't find index for default PKey\n"); 876 877 memset(&ah_attr, 0, sizeof ah_attr); 878 ah_attr.dlid = port_attr.sm_lid; 879 ah_attr.sl = port_attr.sm_sl; 880 ah_attr.port_num = port->port_num; 881 882 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr); 883 if (IS_ERR(new_ah->ah)) { 884 printk(KERN_WARNING "Couldn't create new SM AH\n"); 885 kfree(new_ah); 886 return; 887 } 888 889 spin_lock_irq(&port->ah_lock); 890 if (port->sm_ah) 891 kref_put(&port->sm_ah->ref, free_sm_ah); 892 port->sm_ah = new_ah; 893 spin_unlock_irq(&port->ah_lock); 894 895 } 896 897 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event) 898 { 899 if (event->event == IB_EVENT_PORT_ERR || 900 event->event == IB_EVENT_PORT_ACTIVE || 901 event->event == IB_EVENT_LID_CHANGE || 902 event->event == IB_EVENT_PKEY_CHANGE || 903 event->event == IB_EVENT_SM_CHANGE || 904 event->event == IB_EVENT_CLIENT_REREGISTER) { 905 unsigned long flags; 906 struct ib_sa_device *sa_dev = 907 container_of(handler, typeof(*sa_dev), event_handler); 908 struct ib_sa_port *port = 909 &sa_dev->port[event->element.port_num - sa_dev->start_port]; 910 911 if (!rdma_cap_ib_sa(handler->device, port->port_num)) 912 return; 913 914 spin_lock_irqsave(&port->ah_lock, flags); 915 if (port->sm_ah) 916 kref_put(&port->sm_ah->ref, free_sm_ah); 917 port->sm_ah = NULL; 918 spin_unlock_irqrestore(&port->ah_lock, flags); 919 920 queue_work(ib_wq, &sa_dev->port[event->element.port_num - 921 sa_dev->start_port].update_task); 922 } 923 } 924 925 void ib_sa_register_client(struct ib_sa_client *client) 926 { 927 atomic_set(&client->users, 1); 928 init_completion(&client->comp); 929 } 930 EXPORT_SYMBOL(ib_sa_register_client); 931 932 void ib_sa_unregister_client(struct ib_sa_client *client) 933 { 934 ib_sa_client_put(client); 935 wait_for_completion(&client->comp); 936 } 937 EXPORT_SYMBOL(ib_sa_unregister_client); 938 939 /** 940 * ib_sa_cancel_query - try to cancel an SA query 941 * @id:ID of query to cancel 942 * @query:query pointer to cancel 943 * 944 * Try to cancel an SA query. If the id and query don't match up or 945 * the query has already completed, nothing is done. Otherwise the 946 * query is canceled and will complete with a status of -EINTR. 947 */ 948 void ib_sa_cancel_query(int id, struct ib_sa_query *query) 949 { 950 unsigned long flags; 951 struct ib_mad_agent *agent; 952 struct ib_mad_send_buf *mad_buf; 953 954 spin_lock_irqsave(&idr_lock, flags); 955 if (idr_find(&query_idr, id) != query) { 956 spin_unlock_irqrestore(&idr_lock, flags); 957 return; 958 } 959 agent = query->port->agent; 960 mad_buf = query->mad_buf; 961 spin_unlock_irqrestore(&idr_lock, flags); 962 963 /* 964 * If the query is still on the netlink request list, schedule 965 * it to be cancelled by the timeout routine. Otherwise, it has been 966 * sent to the MAD layer and has to be cancelled from there. 967 */ 968 if (!ib_nl_cancel_request(query)) 969 ib_cancel_mad(agent, mad_buf); 970 } 971 EXPORT_SYMBOL(ib_sa_cancel_query); 972 973 static u8 get_src_path_mask(struct ib_device *device, u8 port_num) 974 { 975 struct ib_sa_device *sa_dev; 976 struct ib_sa_port *port; 977 unsigned long flags; 978 u8 src_path_mask; 979 980 sa_dev = ib_get_client_data(device, &sa_client); 981 if (!sa_dev) 982 return 0x7f; 983 984 port = &sa_dev->port[port_num - sa_dev->start_port]; 985 spin_lock_irqsave(&port->ah_lock, flags); 986 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; 987 spin_unlock_irqrestore(&port->ah_lock, flags); 988 989 return src_path_mask; 990 } 991 992 int ib_init_ah_from_path(struct ib_device *device, u8 port_num, 993 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr) 994 { 995 int ret; 996 u16 gid_index; 997 int force_grh; 998 999 memset(ah_attr, 0, sizeof *ah_attr); 1000 ah_attr->dlid = be16_to_cpu(rec->dlid); 1001 ah_attr->sl = rec->sl; 1002 ah_attr->src_path_bits = be16_to_cpu(rec->slid) & 1003 get_src_path_mask(device, port_num); 1004 ah_attr->port_num = port_num; 1005 ah_attr->static_rate = rec->rate; 1006 1007 force_grh = rdma_cap_eth_ah(device, port_num); 1008 1009 if (rec->hop_limit > 1 || force_grh) { 1010 ah_attr->ah_flags = IB_AH_GRH; 1011 ah_attr->grh.dgid = rec->dgid; 1012 1013 ret = ib_find_cached_gid(device, &rec->sgid, &port_num, 1014 &gid_index); 1015 if (ret) 1016 return ret; 1017 1018 ah_attr->grh.sgid_index = gid_index; 1019 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label); 1020 ah_attr->grh.hop_limit = rec->hop_limit; 1021 ah_attr->grh.traffic_class = rec->traffic_class; 1022 } 1023 if (force_grh) { 1024 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN); 1025 ah_attr->vlan_id = rec->vlan_id; 1026 } else { 1027 ah_attr->vlan_id = 0xffff; 1028 } 1029 1030 return 0; 1031 } 1032 EXPORT_SYMBOL(ib_init_ah_from_path); 1033 1034 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) 1035 { 1036 unsigned long flags; 1037 1038 spin_lock_irqsave(&query->port->ah_lock, flags); 1039 if (!query->port->sm_ah) { 1040 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1041 return -EAGAIN; 1042 } 1043 kref_get(&query->port->sm_ah->ref); 1044 query->sm_ah = query->port->sm_ah; 1045 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1046 1047 query->mad_buf = ib_create_send_mad(query->port->agent, 1, 1048 query->sm_ah->pkey_index, 1049 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 1050 gfp_mask, 1051 IB_MGMT_BASE_VERSION); 1052 if (IS_ERR(query->mad_buf)) { 1053 kref_put(&query->sm_ah->ref, free_sm_ah); 1054 return -ENOMEM; 1055 } 1056 1057 query->mad_buf->ah = query->sm_ah->ah; 1058 1059 return 0; 1060 } 1061 1062 static void free_mad(struct ib_sa_query *query) 1063 { 1064 ib_free_send_mad(query->mad_buf); 1065 kref_put(&query->sm_ah->ref, free_sm_ah); 1066 } 1067 1068 static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) 1069 { 1070 unsigned long flags; 1071 1072 memset(mad, 0, sizeof *mad); 1073 1074 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; 1075 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 1076 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; 1077 1078 spin_lock_irqsave(&tid_lock, flags); 1079 mad->mad_hdr.tid = 1080 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); 1081 spin_unlock_irqrestore(&tid_lock, flags); 1082 } 1083 1084 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) 1085 { 1086 bool preload = !!(gfp_mask & __GFP_WAIT); 1087 unsigned long flags; 1088 int ret, id; 1089 1090 if (preload) 1091 idr_preload(gfp_mask); 1092 spin_lock_irqsave(&idr_lock, flags); 1093 1094 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT); 1095 1096 spin_unlock_irqrestore(&idr_lock, flags); 1097 if (preload) 1098 idr_preload_end(); 1099 if (id < 0) 1100 return id; 1101 1102 query->mad_buf->timeout_ms = timeout_ms; 1103 query->mad_buf->context[0] = query; 1104 query->id = id; 1105 1106 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) { 1107 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) { 1108 if (!ib_nl_make_request(query)) 1109 return id; 1110 } 1111 ib_sa_disable_local_svc(query); 1112 } 1113 1114 ret = ib_post_send_mad(query->mad_buf, NULL); 1115 if (ret) { 1116 spin_lock_irqsave(&idr_lock, flags); 1117 idr_remove(&query_idr, id); 1118 spin_unlock_irqrestore(&idr_lock, flags); 1119 } 1120 1121 /* 1122 * It's not safe to dereference query any more, because the 1123 * send may already have completed and freed the query in 1124 * another context. 1125 */ 1126 return ret ? ret : id; 1127 } 1128 1129 void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec) 1130 { 1131 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); 1132 } 1133 EXPORT_SYMBOL(ib_sa_unpack_path); 1134 1135 void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute) 1136 { 1137 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); 1138 } 1139 EXPORT_SYMBOL(ib_sa_pack_path); 1140 1141 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 1142 int status, 1143 struct ib_sa_mad *mad) 1144 { 1145 struct ib_sa_path_query *query = 1146 container_of(sa_query, struct ib_sa_path_query, sa_query); 1147 1148 if (mad) { 1149 struct ib_sa_path_rec rec; 1150 1151 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 1152 mad->data, &rec); 1153 rec.vlan_id = 0xffff; 1154 memset(rec.dmac, 0, ETH_ALEN); 1155 memset(rec.smac, 0, ETH_ALEN); 1156 query->callback(status, &rec, query->context); 1157 } else 1158 query->callback(status, NULL, query->context); 1159 } 1160 1161 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 1162 { 1163 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query)); 1164 } 1165 1166 /** 1167 * ib_sa_path_rec_get - Start a Path get query 1168 * @client:SA client 1169 * @device:device to send query on 1170 * @port_num: port number to send query on 1171 * @rec:Path Record to send in query 1172 * @comp_mask:component mask to send in query 1173 * @timeout_ms:time to wait for response 1174 * @gfp_mask:GFP mask to use for internal allocations 1175 * @callback:function called when query completes, times out or is 1176 * canceled 1177 * @context:opaque user context passed to callback 1178 * @sa_query:query context, used to cancel query 1179 * 1180 * Send a Path Record Get query to the SA to look up a path. The 1181 * callback function will be called when the query completes (or 1182 * fails); status is 0 for a successful response, -EINTR if the query 1183 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1184 * occurred sending the query. The resp parameter of the callback is 1185 * only valid if status is 0. 1186 * 1187 * If the return value of ib_sa_path_rec_get() is negative, it is an 1188 * error code. Otherwise it is a query ID that can be used to cancel 1189 * the query. 1190 */ 1191 int ib_sa_path_rec_get(struct ib_sa_client *client, 1192 struct ib_device *device, u8 port_num, 1193 struct ib_sa_path_rec *rec, 1194 ib_sa_comp_mask comp_mask, 1195 int timeout_ms, gfp_t gfp_mask, 1196 void (*callback)(int status, 1197 struct ib_sa_path_rec *resp, 1198 void *context), 1199 void *context, 1200 struct ib_sa_query **sa_query) 1201 { 1202 struct ib_sa_path_query *query; 1203 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1204 struct ib_sa_port *port; 1205 struct ib_mad_agent *agent; 1206 struct ib_sa_mad *mad; 1207 int ret; 1208 1209 if (!sa_dev) 1210 return -ENODEV; 1211 1212 port = &sa_dev->port[port_num - sa_dev->start_port]; 1213 agent = port->agent; 1214 1215 query = kzalloc(sizeof(*query), gfp_mask); 1216 if (!query) 1217 return -ENOMEM; 1218 1219 query->sa_query.port = port; 1220 ret = alloc_mad(&query->sa_query, gfp_mask); 1221 if (ret) 1222 goto err1; 1223 1224 ib_sa_client_get(client); 1225 query->sa_query.client = client; 1226 query->callback = callback; 1227 query->context = context; 1228 1229 mad = query->sa_query.mad_buf->mad; 1230 init_mad(mad, agent); 1231 1232 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 1233 query->sa_query.release = ib_sa_path_rec_release; 1234 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1235 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 1236 mad->sa_hdr.comp_mask = comp_mask; 1237 1238 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data); 1239 1240 *sa_query = &query->sa_query; 1241 1242 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; 1243 query->sa_query.mad_buf->context[1] = rec; 1244 1245 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1246 if (ret < 0) 1247 goto err2; 1248 1249 return ret; 1250 1251 err2: 1252 *sa_query = NULL; 1253 ib_sa_client_put(query->sa_query.client); 1254 free_mad(&query->sa_query); 1255 1256 err1: 1257 kfree(query); 1258 return ret; 1259 } 1260 EXPORT_SYMBOL(ib_sa_path_rec_get); 1261 1262 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, 1263 int status, 1264 struct ib_sa_mad *mad) 1265 { 1266 struct ib_sa_service_query *query = 1267 container_of(sa_query, struct ib_sa_service_query, sa_query); 1268 1269 if (mad) { 1270 struct ib_sa_service_rec rec; 1271 1272 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), 1273 mad->data, &rec); 1274 query->callback(status, &rec, query->context); 1275 } else 1276 query->callback(status, NULL, query->context); 1277 } 1278 1279 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) 1280 { 1281 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); 1282 } 1283 1284 /** 1285 * ib_sa_service_rec_query - Start Service Record operation 1286 * @client:SA client 1287 * @device:device to send request on 1288 * @port_num: port number to send request on 1289 * @method:SA method - should be get, set, or delete 1290 * @rec:Service Record to send in request 1291 * @comp_mask:component mask to send in request 1292 * @timeout_ms:time to wait for response 1293 * @gfp_mask:GFP mask to use for internal allocations 1294 * @callback:function called when request completes, times out or is 1295 * canceled 1296 * @context:opaque user context passed to callback 1297 * @sa_query:request context, used to cancel request 1298 * 1299 * Send a Service Record set/get/delete to the SA to register, 1300 * unregister or query a service record. 1301 * The callback function will be called when the request completes (or 1302 * fails); status is 0 for a successful response, -EINTR if the query 1303 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1304 * occurred sending the query. The resp parameter of the callback is 1305 * only valid if status is 0. 1306 * 1307 * If the return value of ib_sa_service_rec_query() is negative, it is an 1308 * error code. Otherwise it is a request ID that can be used to cancel 1309 * the query. 1310 */ 1311 int ib_sa_service_rec_query(struct ib_sa_client *client, 1312 struct ib_device *device, u8 port_num, u8 method, 1313 struct ib_sa_service_rec *rec, 1314 ib_sa_comp_mask comp_mask, 1315 int timeout_ms, gfp_t gfp_mask, 1316 void (*callback)(int status, 1317 struct ib_sa_service_rec *resp, 1318 void *context), 1319 void *context, 1320 struct ib_sa_query **sa_query) 1321 { 1322 struct ib_sa_service_query *query; 1323 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1324 struct ib_sa_port *port; 1325 struct ib_mad_agent *agent; 1326 struct ib_sa_mad *mad; 1327 int ret; 1328 1329 if (!sa_dev) 1330 return -ENODEV; 1331 1332 port = &sa_dev->port[port_num - sa_dev->start_port]; 1333 agent = port->agent; 1334 1335 if (method != IB_MGMT_METHOD_GET && 1336 method != IB_MGMT_METHOD_SET && 1337 method != IB_SA_METHOD_DELETE) 1338 return -EINVAL; 1339 1340 query = kzalloc(sizeof(*query), gfp_mask); 1341 if (!query) 1342 return -ENOMEM; 1343 1344 query->sa_query.port = port; 1345 ret = alloc_mad(&query->sa_query, gfp_mask); 1346 if (ret) 1347 goto err1; 1348 1349 ib_sa_client_get(client); 1350 query->sa_query.client = client; 1351 query->callback = callback; 1352 query->context = context; 1353 1354 mad = query->sa_query.mad_buf->mad; 1355 init_mad(mad, agent); 1356 1357 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; 1358 query->sa_query.release = ib_sa_service_rec_release; 1359 mad->mad_hdr.method = method; 1360 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); 1361 mad->sa_hdr.comp_mask = comp_mask; 1362 1363 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), 1364 rec, mad->data); 1365 1366 *sa_query = &query->sa_query; 1367 1368 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1369 if (ret < 0) 1370 goto err2; 1371 1372 return ret; 1373 1374 err2: 1375 *sa_query = NULL; 1376 ib_sa_client_put(query->sa_query.client); 1377 free_mad(&query->sa_query); 1378 1379 err1: 1380 kfree(query); 1381 return ret; 1382 } 1383 EXPORT_SYMBOL(ib_sa_service_rec_query); 1384 1385 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 1386 int status, 1387 struct ib_sa_mad *mad) 1388 { 1389 struct ib_sa_mcmember_query *query = 1390 container_of(sa_query, struct ib_sa_mcmember_query, sa_query); 1391 1392 if (mad) { 1393 struct ib_sa_mcmember_rec rec; 1394 1395 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1396 mad->data, &rec); 1397 query->callback(status, &rec, query->context); 1398 } else 1399 query->callback(status, NULL, query->context); 1400 } 1401 1402 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 1403 { 1404 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 1405 } 1406 1407 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 1408 struct ib_device *device, u8 port_num, 1409 u8 method, 1410 struct ib_sa_mcmember_rec *rec, 1411 ib_sa_comp_mask comp_mask, 1412 int timeout_ms, gfp_t gfp_mask, 1413 void (*callback)(int status, 1414 struct ib_sa_mcmember_rec *resp, 1415 void *context), 1416 void *context, 1417 struct ib_sa_query **sa_query) 1418 { 1419 struct ib_sa_mcmember_query *query; 1420 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1421 struct ib_sa_port *port; 1422 struct ib_mad_agent *agent; 1423 struct ib_sa_mad *mad; 1424 int ret; 1425 1426 if (!sa_dev) 1427 return -ENODEV; 1428 1429 port = &sa_dev->port[port_num - sa_dev->start_port]; 1430 agent = port->agent; 1431 1432 query = kzalloc(sizeof(*query), gfp_mask); 1433 if (!query) 1434 return -ENOMEM; 1435 1436 query->sa_query.port = port; 1437 ret = alloc_mad(&query->sa_query, gfp_mask); 1438 if (ret) 1439 goto err1; 1440 1441 ib_sa_client_get(client); 1442 query->sa_query.client = client; 1443 query->callback = callback; 1444 query->context = context; 1445 1446 mad = query->sa_query.mad_buf->mad; 1447 init_mad(mad, agent); 1448 1449 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 1450 query->sa_query.release = ib_sa_mcmember_rec_release; 1451 mad->mad_hdr.method = method; 1452 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 1453 mad->sa_hdr.comp_mask = comp_mask; 1454 1455 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1456 rec, mad->data); 1457 1458 *sa_query = &query->sa_query; 1459 1460 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1461 if (ret < 0) 1462 goto err2; 1463 1464 return ret; 1465 1466 err2: 1467 *sa_query = NULL; 1468 ib_sa_client_put(query->sa_query.client); 1469 free_mad(&query->sa_query); 1470 1471 err1: 1472 kfree(query); 1473 return ret; 1474 } 1475 1476 /* Support GuidInfoRecord */ 1477 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, 1478 int status, 1479 struct ib_sa_mad *mad) 1480 { 1481 struct ib_sa_guidinfo_query *query = 1482 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); 1483 1484 if (mad) { 1485 struct ib_sa_guidinfo_rec rec; 1486 1487 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), 1488 mad->data, &rec); 1489 query->callback(status, &rec, query->context); 1490 } else 1491 query->callback(status, NULL, query->context); 1492 } 1493 1494 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) 1495 { 1496 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); 1497 } 1498 1499 int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 1500 struct ib_device *device, u8 port_num, 1501 struct ib_sa_guidinfo_rec *rec, 1502 ib_sa_comp_mask comp_mask, u8 method, 1503 int timeout_ms, gfp_t gfp_mask, 1504 void (*callback)(int status, 1505 struct ib_sa_guidinfo_rec *resp, 1506 void *context), 1507 void *context, 1508 struct ib_sa_query **sa_query) 1509 { 1510 struct ib_sa_guidinfo_query *query; 1511 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1512 struct ib_sa_port *port; 1513 struct ib_mad_agent *agent; 1514 struct ib_sa_mad *mad; 1515 int ret; 1516 1517 if (!sa_dev) 1518 return -ENODEV; 1519 1520 if (method != IB_MGMT_METHOD_GET && 1521 method != IB_MGMT_METHOD_SET && 1522 method != IB_SA_METHOD_DELETE) { 1523 return -EINVAL; 1524 } 1525 1526 port = &sa_dev->port[port_num - sa_dev->start_port]; 1527 agent = port->agent; 1528 1529 query = kzalloc(sizeof(*query), gfp_mask); 1530 if (!query) 1531 return -ENOMEM; 1532 1533 query->sa_query.port = port; 1534 ret = alloc_mad(&query->sa_query, gfp_mask); 1535 if (ret) 1536 goto err1; 1537 1538 ib_sa_client_get(client); 1539 query->sa_query.client = client; 1540 query->callback = callback; 1541 query->context = context; 1542 1543 mad = query->sa_query.mad_buf->mad; 1544 init_mad(mad, agent); 1545 1546 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; 1547 query->sa_query.release = ib_sa_guidinfo_rec_release; 1548 1549 mad->mad_hdr.method = method; 1550 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); 1551 mad->sa_hdr.comp_mask = comp_mask; 1552 1553 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, 1554 mad->data); 1555 1556 *sa_query = &query->sa_query; 1557 1558 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1559 if (ret < 0) 1560 goto err2; 1561 1562 return ret; 1563 1564 err2: 1565 *sa_query = NULL; 1566 ib_sa_client_put(query->sa_query.client); 1567 free_mad(&query->sa_query); 1568 1569 err1: 1570 kfree(query); 1571 return ret; 1572 } 1573 EXPORT_SYMBOL(ib_sa_guid_info_rec_query); 1574 1575 static void send_handler(struct ib_mad_agent *agent, 1576 struct ib_mad_send_wc *mad_send_wc) 1577 { 1578 struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 1579 unsigned long flags; 1580 1581 if (query->callback) 1582 switch (mad_send_wc->status) { 1583 case IB_WC_SUCCESS: 1584 /* No callback -- already got recv */ 1585 break; 1586 case IB_WC_RESP_TIMEOUT_ERR: 1587 query->callback(query, -ETIMEDOUT, NULL); 1588 break; 1589 case IB_WC_WR_FLUSH_ERR: 1590 query->callback(query, -EINTR, NULL); 1591 break; 1592 default: 1593 query->callback(query, -EIO, NULL); 1594 break; 1595 } 1596 1597 spin_lock_irqsave(&idr_lock, flags); 1598 idr_remove(&query_idr, query->id); 1599 spin_unlock_irqrestore(&idr_lock, flags); 1600 1601 free_mad(query); 1602 ib_sa_client_put(query->client); 1603 query->release(query); 1604 } 1605 1606 static void recv_handler(struct ib_mad_agent *mad_agent, 1607 struct ib_mad_recv_wc *mad_recv_wc) 1608 { 1609 struct ib_sa_query *query; 1610 struct ib_mad_send_buf *mad_buf; 1611 1612 mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id; 1613 query = mad_buf->context[0]; 1614 1615 if (query->callback) { 1616 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 1617 query->callback(query, 1618 mad_recv_wc->recv_buf.mad->mad_hdr.status ? 1619 -EINVAL : 0, 1620 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); 1621 else 1622 query->callback(query, -EIO, NULL); 1623 } 1624 1625 ib_free_recv_mad(mad_recv_wc); 1626 } 1627 1628 static void ib_sa_add_one(struct ib_device *device) 1629 { 1630 struct ib_sa_device *sa_dev; 1631 int s, e, i; 1632 int count = 0; 1633 1634 s = rdma_start_port(device); 1635 e = rdma_end_port(device); 1636 1637 sa_dev = kzalloc(sizeof *sa_dev + 1638 (e - s + 1) * sizeof (struct ib_sa_port), 1639 GFP_KERNEL); 1640 if (!sa_dev) 1641 return; 1642 1643 sa_dev->start_port = s; 1644 sa_dev->end_port = e; 1645 1646 for (i = 0; i <= e - s; ++i) { 1647 spin_lock_init(&sa_dev->port[i].ah_lock); 1648 if (!rdma_cap_ib_sa(device, i + 1)) 1649 continue; 1650 1651 sa_dev->port[i].sm_ah = NULL; 1652 sa_dev->port[i].port_num = i + s; 1653 1654 sa_dev->port[i].agent = 1655 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 1656 NULL, 0, send_handler, 1657 recv_handler, sa_dev, 0); 1658 if (IS_ERR(sa_dev->port[i].agent)) 1659 goto err; 1660 1661 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 1662 1663 count++; 1664 } 1665 1666 if (!count) 1667 goto free; 1668 1669 ib_set_client_data(device, &sa_client, sa_dev); 1670 1671 /* 1672 * We register our event handler after everything is set up, 1673 * and then update our cached info after the event handler is 1674 * registered to avoid any problems if a port changes state 1675 * during our initialization. 1676 */ 1677 1678 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); 1679 if (ib_register_event_handler(&sa_dev->event_handler)) 1680 goto err; 1681 1682 for (i = 0; i <= e - s; ++i) { 1683 if (rdma_cap_ib_sa(device, i + 1)) 1684 update_sm_ah(&sa_dev->port[i].update_task); 1685 } 1686 1687 return; 1688 1689 err: 1690 while (--i >= 0) { 1691 if (rdma_cap_ib_sa(device, i + 1)) 1692 ib_unregister_mad_agent(sa_dev->port[i].agent); 1693 } 1694 free: 1695 kfree(sa_dev); 1696 return; 1697 } 1698 1699 static void ib_sa_remove_one(struct ib_device *device, void *client_data) 1700 { 1701 struct ib_sa_device *sa_dev = client_data; 1702 int i; 1703 1704 if (!sa_dev) 1705 return; 1706 1707 ib_unregister_event_handler(&sa_dev->event_handler); 1708 1709 flush_workqueue(ib_wq); 1710 1711 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 1712 if (rdma_cap_ib_sa(device, i + 1)) { 1713 ib_unregister_mad_agent(sa_dev->port[i].agent); 1714 if (sa_dev->port[i].sm_ah) 1715 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 1716 } 1717 1718 } 1719 1720 kfree(sa_dev); 1721 } 1722 1723 static int __init ib_sa_init(void) 1724 { 1725 int ret; 1726 1727 get_random_bytes(&tid, sizeof tid); 1728 1729 atomic_set(&ib_nl_sa_request_seq, 0); 1730 1731 ret = ib_register_client(&sa_client); 1732 if (ret) { 1733 printk(KERN_ERR "Couldn't register ib_sa client\n"); 1734 goto err1; 1735 } 1736 1737 ret = mcast_init(); 1738 if (ret) { 1739 printk(KERN_ERR "Couldn't initialize multicast handling\n"); 1740 goto err2; 1741 } 1742 1743 ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq"); 1744 if (!ib_nl_wq) { 1745 ret = -ENOMEM; 1746 goto err3; 1747 } 1748 1749 if (ibnl_add_client(RDMA_NL_LS, RDMA_NL_LS_NUM_OPS, 1750 ib_sa_cb_table)) { 1751 pr_err("Failed to add netlink callback\n"); 1752 ret = -EINVAL; 1753 goto err4; 1754 } 1755 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); 1756 1757 return 0; 1758 err4: 1759 destroy_workqueue(ib_nl_wq); 1760 err3: 1761 mcast_cleanup(); 1762 err2: 1763 ib_unregister_client(&sa_client); 1764 err1: 1765 return ret; 1766 } 1767 1768 static void __exit ib_sa_cleanup(void) 1769 { 1770 ibnl_remove_client(RDMA_NL_LS); 1771 cancel_delayed_work(&ib_nl_timed_work); 1772 flush_workqueue(ib_nl_wq); 1773 destroy_workqueue(ib_nl_wq); 1774 mcast_cleanup(); 1775 ib_unregister_client(&sa_client); 1776 idr_destroy(&query_idr); 1777 } 1778 1779 module_init(ib_sa_init); 1780 module_exit(ib_sa_cleanup); 1781