xref: /linux/drivers/infiniband/core/sa_query.c (revision feb7c1e38bccfd18cc06677cb648ed2340788fe8)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
4  * Copyright (c) 2006 Intel Corporation.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/idr.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
52 #include <rdma/ib_addr.h>
53 #include "sa.h"
54 #include "core_priv.h"
55 
56 MODULE_AUTHOR("Roland Dreier");
57 MODULE_DESCRIPTION("InfiniBand subnet administration query support");
58 MODULE_LICENSE("Dual BSD/GPL");
59 
60 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN		100
61 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT		2000
62 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX		200000
63 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
64 
65 struct ib_sa_sm_ah {
66 	struct ib_ah        *ah;
67 	struct kref          ref;
68 	u16		     pkey_index;
69 	u8		     src_path_mask;
70 };
71 
72 struct ib_sa_port {
73 	struct ib_mad_agent *agent;
74 	struct ib_sa_sm_ah  *sm_ah;
75 	struct work_struct   update_task;
76 	spinlock_t           ah_lock;
77 	u8                   port_num;
78 };
79 
80 struct ib_sa_device {
81 	int                     start_port, end_port;
82 	struct ib_event_handler event_handler;
83 	struct ib_sa_port port[0];
84 };
85 
86 struct ib_sa_query {
87 	void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
88 	void (*release)(struct ib_sa_query *);
89 	struct ib_sa_client    *client;
90 	struct ib_sa_port      *port;
91 	struct ib_mad_send_buf *mad_buf;
92 	struct ib_sa_sm_ah     *sm_ah;
93 	int			id;
94 	u32			flags;
95 	struct list_head	list; /* Local svc request list */
96 	u32			seq; /* Local svc request sequence number */
97 	unsigned long		timeout; /* Local svc timeout */
98 	u8			path_use; /* How will the pathrecord be used */
99 };
100 
101 #define IB_SA_ENABLE_LOCAL_SERVICE	0x00000001
102 #define IB_SA_CANCEL			0x00000002
103 
104 struct ib_sa_service_query {
105 	void (*callback)(int, struct ib_sa_service_rec *, void *);
106 	void *context;
107 	struct ib_sa_query sa_query;
108 };
109 
110 struct ib_sa_path_query {
111 	void (*callback)(int, struct ib_sa_path_rec *, void *);
112 	void *context;
113 	struct ib_sa_query sa_query;
114 };
115 
116 struct ib_sa_guidinfo_query {
117 	void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
118 	void *context;
119 	struct ib_sa_query sa_query;
120 };
121 
122 struct ib_sa_mcmember_query {
123 	void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
124 	void *context;
125 	struct ib_sa_query sa_query;
126 };
127 
128 static LIST_HEAD(ib_nl_request_list);
129 static DEFINE_SPINLOCK(ib_nl_request_lock);
130 static atomic_t ib_nl_sa_request_seq;
131 static struct workqueue_struct *ib_nl_wq;
132 static struct delayed_work ib_nl_timed_work;
133 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
134 	[LS_NLA_TYPE_PATH_RECORD]	= {.type = NLA_BINARY,
135 		.len = sizeof(struct ib_path_rec_data)},
136 	[LS_NLA_TYPE_TIMEOUT]		= {.type = NLA_U32},
137 	[LS_NLA_TYPE_SERVICE_ID]	= {.type = NLA_U64},
138 	[LS_NLA_TYPE_DGID]		= {.type = NLA_BINARY,
139 		.len = sizeof(struct rdma_nla_ls_gid)},
140 	[LS_NLA_TYPE_SGID]		= {.type = NLA_BINARY,
141 		.len = sizeof(struct rdma_nla_ls_gid)},
142 	[LS_NLA_TYPE_TCLASS]		= {.type = NLA_U8},
143 	[LS_NLA_TYPE_PKEY]		= {.type = NLA_U16},
144 	[LS_NLA_TYPE_QOS_CLASS]		= {.type = NLA_U16},
145 };
146 
147 
148 static void ib_sa_add_one(struct ib_device *device);
149 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
150 
151 static struct ib_client sa_client = {
152 	.name   = "sa",
153 	.add    = ib_sa_add_one,
154 	.remove = ib_sa_remove_one
155 };
156 
157 static DEFINE_SPINLOCK(idr_lock);
158 static DEFINE_IDR(query_idr);
159 
160 static DEFINE_SPINLOCK(tid_lock);
161 static u32 tid;
162 
163 #define PATH_REC_FIELD(field) \
164 	.struct_offset_bytes = offsetof(struct ib_sa_path_rec, field),		\
165 	.struct_size_bytes   = sizeof ((struct ib_sa_path_rec *) 0)->field,	\
166 	.field_name          = "sa_path_rec:" #field
167 
168 static const struct ib_field path_rec_table[] = {
169 	{ PATH_REC_FIELD(service_id),
170 	  .offset_words = 0,
171 	  .offset_bits  = 0,
172 	  .size_bits    = 64 },
173 	{ PATH_REC_FIELD(dgid),
174 	  .offset_words = 2,
175 	  .offset_bits  = 0,
176 	  .size_bits    = 128 },
177 	{ PATH_REC_FIELD(sgid),
178 	  .offset_words = 6,
179 	  .offset_bits  = 0,
180 	  .size_bits    = 128 },
181 	{ PATH_REC_FIELD(dlid),
182 	  .offset_words = 10,
183 	  .offset_bits  = 0,
184 	  .size_bits    = 16 },
185 	{ PATH_REC_FIELD(slid),
186 	  .offset_words = 10,
187 	  .offset_bits  = 16,
188 	  .size_bits    = 16 },
189 	{ PATH_REC_FIELD(raw_traffic),
190 	  .offset_words = 11,
191 	  .offset_bits  = 0,
192 	  .size_bits    = 1 },
193 	{ RESERVED,
194 	  .offset_words = 11,
195 	  .offset_bits  = 1,
196 	  .size_bits    = 3 },
197 	{ PATH_REC_FIELD(flow_label),
198 	  .offset_words = 11,
199 	  .offset_bits  = 4,
200 	  .size_bits    = 20 },
201 	{ PATH_REC_FIELD(hop_limit),
202 	  .offset_words = 11,
203 	  .offset_bits  = 24,
204 	  .size_bits    = 8 },
205 	{ PATH_REC_FIELD(traffic_class),
206 	  .offset_words = 12,
207 	  .offset_bits  = 0,
208 	  .size_bits    = 8 },
209 	{ PATH_REC_FIELD(reversible),
210 	  .offset_words = 12,
211 	  .offset_bits  = 8,
212 	  .size_bits    = 1 },
213 	{ PATH_REC_FIELD(numb_path),
214 	  .offset_words = 12,
215 	  .offset_bits  = 9,
216 	  .size_bits    = 7 },
217 	{ PATH_REC_FIELD(pkey),
218 	  .offset_words = 12,
219 	  .offset_bits  = 16,
220 	  .size_bits    = 16 },
221 	{ PATH_REC_FIELD(qos_class),
222 	  .offset_words = 13,
223 	  .offset_bits  = 0,
224 	  .size_bits    = 12 },
225 	{ PATH_REC_FIELD(sl),
226 	  .offset_words = 13,
227 	  .offset_bits  = 12,
228 	  .size_bits    = 4 },
229 	{ PATH_REC_FIELD(mtu_selector),
230 	  .offset_words = 13,
231 	  .offset_bits  = 16,
232 	  .size_bits    = 2 },
233 	{ PATH_REC_FIELD(mtu),
234 	  .offset_words = 13,
235 	  .offset_bits  = 18,
236 	  .size_bits    = 6 },
237 	{ PATH_REC_FIELD(rate_selector),
238 	  .offset_words = 13,
239 	  .offset_bits  = 24,
240 	  .size_bits    = 2 },
241 	{ PATH_REC_FIELD(rate),
242 	  .offset_words = 13,
243 	  .offset_bits  = 26,
244 	  .size_bits    = 6 },
245 	{ PATH_REC_FIELD(packet_life_time_selector),
246 	  .offset_words = 14,
247 	  .offset_bits  = 0,
248 	  .size_bits    = 2 },
249 	{ PATH_REC_FIELD(packet_life_time),
250 	  .offset_words = 14,
251 	  .offset_bits  = 2,
252 	  .size_bits    = 6 },
253 	{ PATH_REC_FIELD(preference),
254 	  .offset_words = 14,
255 	  .offset_bits  = 8,
256 	  .size_bits    = 8 },
257 	{ RESERVED,
258 	  .offset_words = 14,
259 	  .offset_bits  = 16,
260 	  .size_bits    = 48 },
261 };
262 
263 #define MCMEMBER_REC_FIELD(field) \
264 	.struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field),	\
265 	.struct_size_bytes   = sizeof ((struct ib_sa_mcmember_rec *) 0)->field,	\
266 	.field_name          = "sa_mcmember_rec:" #field
267 
268 static const struct ib_field mcmember_rec_table[] = {
269 	{ MCMEMBER_REC_FIELD(mgid),
270 	  .offset_words = 0,
271 	  .offset_bits  = 0,
272 	  .size_bits    = 128 },
273 	{ MCMEMBER_REC_FIELD(port_gid),
274 	  .offset_words = 4,
275 	  .offset_bits  = 0,
276 	  .size_bits    = 128 },
277 	{ MCMEMBER_REC_FIELD(qkey),
278 	  .offset_words = 8,
279 	  .offset_bits  = 0,
280 	  .size_bits    = 32 },
281 	{ MCMEMBER_REC_FIELD(mlid),
282 	  .offset_words = 9,
283 	  .offset_bits  = 0,
284 	  .size_bits    = 16 },
285 	{ MCMEMBER_REC_FIELD(mtu_selector),
286 	  .offset_words = 9,
287 	  .offset_bits  = 16,
288 	  .size_bits    = 2 },
289 	{ MCMEMBER_REC_FIELD(mtu),
290 	  .offset_words = 9,
291 	  .offset_bits  = 18,
292 	  .size_bits    = 6 },
293 	{ MCMEMBER_REC_FIELD(traffic_class),
294 	  .offset_words = 9,
295 	  .offset_bits  = 24,
296 	  .size_bits    = 8 },
297 	{ MCMEMBER_REC_FIELD(pkey),
298 	  .offset_words = 10,
299 	  .offset_bits  = 0,
300 	  .size_bits    = 16 },
301 	{ MCMEMBER_REC_FIELD(rate_selector),
302 	  .offset_words = 10,
303 	  .offset_bits  = 16,
304 	  .size_bits    = 2 },
305 	{ MCMEMBER_REC_FIELD(rate),
306 	  .offset_words = 10,
307 	  .offset_bits  = 18,
308 	  .size_bits    = 6 },
309 	{ MCMEMBER_REC_FIELD(packet_life_time_selector),
310 	  .offset_words = 10,
311 	  .offset_bits  = 24,
312 	  .size_bits    = 2 },
313 	{ MCMEMBER_REC_FIELD(packet_life_time),
314 	  .offset_words = 10,
315 	  .offset_bits  = 26,
316 	  .size_bits    = 6 },
317 	{ MCMEMBER_REC_FIELD(sl),
318 	  .offset_words = 11,
319 	  .offset_bits  = 0,
320 	  .size_bits    = 4 },
321 	{ MCMEMBER_REC_FIELD(flow_label),
322 	  .offset_words = 11,
323 	  .offset_bits  = 4,
324 	  .size_bits    = 20 },
325 	{ MCMEMBER_REC_FIELD(hop_limit),
326 	  .offset_words = 11,
327 	  .offset_bits  = 24,
328 	  .size_bits    = 8 },
329 	{ MCMEMBER_REC_FIELD(scope),
330 	  .offset_words = 12,
331 	  .offset_bits  = 0,
332 	  .size_bits    = 4 },
333 	{ MCMEMBER_REC_FIELD(join_state),
334 	  .offset_words = 12,
335 	  .offset_bits  = 4,
336 	  .size_bits    = 4 },
337 	{ MCMEMBER_REC_FIELD(proxy_join),
338 	  .offset_words = 12,
339 	  .offset_bits  = 8,
340 	  .size_bits    = 1 },
341 	{ RESERVED,
342 	  .offset_words = 12,
343 	  .offset_bits  = 9,
344 	  .size_bits    = 23 },
345 };
346 
347 #define SERVICE_REC_FIELD(field) \
348 	.struct_offset_bytes = offsetof(struct ib_sa_service_rec, field),	\
349 	.struct_size_bytes   = sizeof ((struct ib_sa_service_rec *) 0)->field,	\
350 	.field_name          = "sa_service_rec:" #field
351 
352 static const struct ib_field service_rec_table[] = {
353 	{ SERVICE_REC_FIELD(id),
354 	  .offset_words = 0,
355 	  .offset_bits  = 0,
356 	  .size_bits    = 64 },
357 	{ SERVICE_REC_FIELD(gid),
358 	  .offset_words = 2,
359 	  .offset_bits  = 0,
360 	  .size_bits    = 128 },
361 	{ SERVICE_REC_FIELD(pkey),
362 	  .offset_words = 6,
363 	  .offset_bits  = 0,
364 	  .size_bits    = 16 },
365 	{ SERVICE_REC_FIELD(lease),
366 	  .offset_words = 7,
367 	  .offset_bits  = 0,
368 	  .size_bits    = 32 },
369 	{ SERVICE_REC_FIELD(key),
370 	  .offset_words = 8,
371 	  .offset_bits  = 0,
372 	  .size_bits    = 128 },
373 	{ SERVICE_REC_FIELD(name),
374 	  .offset_words = 12,
375 	  .offset_bits  = 0,
376 	  .size_bits    = 64*8 },
377 	{ SERVICE_REC_FIELD(data8),
378 	  .offset_words = 28,
379 	  .offset_bits  = 0,
380 	  .size_bits    = 16*8 },
381 	{ SERVICE_REC_FIELD(data16),
382 	  .offset_words = 32,
383 	  .offset_bits  = 0,
384 	  .size_bits    = 8*16 },
385 	{ SERVICE_REC_FIELD(data32),
386 	  .offset_words = 36,
387 	  .offset_bits  = 0,
388 	  .size_bits    = 4*32 },
389 	{ SERVICE_REC_FIELD(data64),
390 	  .offset_words = 40,
391 	  .offset_bits  = 0,
392 	  .size_bits    = 2*64 },
393 };
394 
395 #define GUIDINFO_REC_FIELD(field) \
396 	.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field),	\
397 	.struct_size_bytes   = sizeof((struct ib_sa_guidinfo_rec *) 0)->field,	\
398 	.field_name          = "sa_guidinfo_rec:" #field
399 
400 static const struct ib_field guidinfo_rec_table[] = {
401 	{ GUIDINFO_REC_FIELD(lid),
402 	  .offset_words = 0,
403 	  .offset_bits  = 0,
404 	  .size_bits    = 16 },
405 	{ GUIDINFO_REC_FIELD(block_num),
406 	  .offset_words = 0,
407 	  .offset_bits  = 16,
408 	  .size_bits    = 8 },
409 	{ GUIDINFO_REC_FIELD(res1),
410 	  .offset_words = 0,
411 	  .offset_bits  = 24,
412 	  .size_bits    = 8 },
413 	{ GUIDINFO_REC_FIELD(res2),
414 	  .offset_words = 1,
415 	  .offset_bits  = 0,
416 	  .size_bits    = 32 },
417 	{ GUIDINFO_REC_FIELD(guid_info_list),
418 	  .offset_words = 2,
419 	  .offset_bits  = 0,
420 	  .size_bits    = 512 },
421 };
422 
423 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
424 {
425 	query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
426 }
427 
428 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
429 {
430 	return (query->flags & IB_SA_CANCEL);
431 }
432 
433 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
434 				     struct ib_sa_query *query)
435 {
436 	struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1];
437 	struct ib_sa_mad *mad = query->mad_buf->mad;
438 	ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
439 	u16 val16;
440 	u64 val64;
441 	struct rdma_ls_resolve_header *header;
442 
443 	query->mad_buf->context[1] = NULL;
444 
445 	/* Construct the family header first */
446 	header = (struct rdma_ls_resolve_header *)
447 		skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
448 	memcpy(header->device_name, query->port->agent->device->name,
449 	       LS_DEVICE_NAME_MAX);
450 	header->port_num = query->port->port_num;
451 
452 	if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
453 	    sa_rec->reversible != 0)
454 		query->path_use = LS_RESOLVE_PATH_USE_GMP;
455 	else
456 		query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
457 	header->path_use = query->path_use;
458 
459 	/* Now build the attributes */
460 	if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
461 		val64 = be64_to_cpu(sa_rec->service_id);
462 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
463 			sizeof(val64), &val64);
464 	}
465 	if (comp_mask & IB_SA_PATH_REC_DGID)
466 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
467 			sizeof(sa_rec->dgid), &sa_rec->dgid);
468 	if (comp_mask & IB_SA_PATH_REC_SGID)
469 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
470 			sizeof(sa_rec->sgid), &sa_rec->sgid);
471 	if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
472 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
473 			sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
474 
475 	if (comp_mask & IB_SA_PATH_REC_PKEY) {
476 		val16 = be16_to_cpu(sa_rec->pkey);
477 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
478 			sizeof(val16), &val16);
479 	}
480 	if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
481 		val16 = be16_to_cpu(sa_rec->qos_class);
482 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
483 			sizeof(val16), &val16);
484 	}
485 }
486 
487 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
488 {
489 	int len = 0;
490 
491 	if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
492 		len += nla_total_size(sizeof(u64));
493 	if (comp_mask & IB_SA_PATH_REC_DGID)
494 		len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
495 	if (comp_mask & IB_SA_PATH_REC_SGID)
496 		len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
497 	if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
498 		len += nla_total_size(sizeof(u8));
499 	if (comp_mask & IB_SA_PATH_REC_PKEY)
500 		len += nla_total_size(sizeof(u16));
501 	if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
502 		len += nla_total_size(sizeof(u16));
503 
504 	/*
505 	 * Make sure that at least some of the required comp_mask bits are
506 	 * set.
507 	 */
508 	if (WARN_ON(len == 0))
509 		return len;
510 
511 	/* Add the family header */
512 	len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
513 
514 	return len;
515 }
516 
517 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
518 {
519 	struct sk_buff *skb = NULL;
520 	struct nlmsghdr *nlh;
521 	void *data;
522 	int ret = 0;
523 	struct ib_sa_mad *mad;
524 	int len;
525 
526 	mad = query->mad_buf->mad;
527 	len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
528 	if (len <= 0)
529 		return -EMSGSIZE;
530 
531 	skb = nlmsg_new(len, gfp_mask);
532 	if (!skb)
533 		return -ENOMEM;
534 
535 	/* Put nlmsg header only for now */
536 	data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
537 			    RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
538 	if (!data) {
539 		kfree_skb(skb);
540 		return -EMSGSIZE;
541 	}
542 
543 	/* Add attributes */
544 	ib_nl_set_path_rec_attrs(skb, query);
545 
546 	/* Repair the nlmsg header length */
547 	nlmsg_end(skb, nlh);
548 
549 	ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
550 	if (!ret)
551 		ret = len;
552 	else
553 		ret = 0;
554 
555 	return ret;
556 }
557 
558 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
559 {
560 	unsigned long flags;
561 	unsigned long delay;
562 	int ret;
563 
564 	INIT_LIST_HEAD(&query->list);
565 	query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
566 
567 	/* Put the request on the list first.*/
568 	spin_lock_irqsave(&ib_nl_request_lock, flags);
569 	delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
570 	query->timeout = delay + jiffies;
571 	list_add_tail(&query->list, &ib_nl_request_list);
572 	/* Start the timeout if this is the only request */
573 	if (ib_nl_request_list.next == &query->list)
574 		queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
575 	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
576 
577 	ret = ib_nl_send_msg(query, gfp_mask);
578 	if (ret <= 0) {
579 		ret = -EIO;
580 		/* Remove the request */
581 		spin_lock_irqsave(&ib_nl_request_lock, flags);
582 		list_del(&query->list);
583 		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
584 	} else {
585 		ret = 0;
586 	}
587 
588 	return ret;
589 }
590 
591 static int ib_nl_cancel_request(struct ib_sa_query *query)
592 {
593 	unsigned long flags;
594 	struct ib_sa_query *wait_query;
595 	int found = 0;
596 
597 	spin_lock_irqsave(&ib_nl_request_lock, flags);
598 	list_for_each_entry(wait_query, &ib_nl_request_list, list) {
599 		/* Let the timeout to take care of the callback */
600 		if (query == wait_query) {
601 			query->flags |= IB_SA_CANCEL;
602 			query->timeout = jiffies;
603 			list_move(&query->list, &ib_nl_request_list);
604 			found = 1;
605 			mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
606 			break;
607 		}
608 	}
609 	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
610 
611 	return found;
612 }
613 
614 static void send_handler(struct ib_mad_agent *agent,
615 			 struct ib_mad_send_wc *mad_send_wc);
616 
617 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
618 					   const struct nlmsghdr *nlh)
619 {
620 	struct ib_mad_send_wc mad_send_wc;
621 	struct ib_sa_mad *mad = NULL;
622 	const struct nlattr *head, *curr;
623 	struct ib_path_rec_data  *rec;
624 	int len, rem;
625 	u32 mask = 0;
626 	int status = -EIO;
627 
628 	if (query->callback) {
629 		head = (const struct nlattr *) nlmsg_data(nlh);
630 		len = nlmsg_len(nlh);
631 		switch (query->path_use) {
632 		case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
633 			mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
634 			break;
635 
636 		case LS_RESOLVE_PATH_USE_ALL:
637 		case LS_RESOLVE_PATH_USE_GMP:
638 		default:
639 			mask = IB_PATH_PRIMARY | IB_PATH_GMP |
640 				IB_PATH_BIDIRECTIONAL;
641 			break;
642 		}
643 		nla_for_each_attr(curr, head, len, rem) {
644 			if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
645 				rec = nla_data(curr);
646 				/*
647 				 * Get the first one. In the future, we may
648 				 * need to get up to 6 pathrecords.
649 				 */
650 				if ((rec->flags & mask) == mask) {
651 					mad = query->mad_buf->mad;
652 					mad->mad_hdr.method |=
653 						IB_MGMT_METHOD_RESP;
654 					memcpy(mad->data, rec->path_rec,
655 					       sizeof(rec->path_rec));
656 					status = 0;
657 					break;
658 				}
659 			}
660 		}
661 		query->callback(query, status, mad);
662 	}
663 
664 	mad_send_wc.send_buf = query->mad_buf;
665 	mad_send_wc.status = IB_WC_SUCCESS;
666 	send_handler(query->mad_buf->mad_agent, &mad_send_wc);
667 }
668 
669 static void ib_nl_request_timeout(struct work_struct *work)
670 {
671 	unsigned long flags;
672 	struct ib_sa_query *query;
673 	unsigned long delay;
674 	struct ib_mad_send_wc mad_send_wc;
675 	int ret;
676 
677 	spin_lock_irqsave(&ib_nl_request_lock, flags);
678 	while (!list_empty(&ib_nl_request_list)) {
679 		query = list_entry(ib_nl_request_list.next,
680 				   struct ib_sa_query, list);
681 
682 		if (time_after(query->timeout, jiffies)) {
683 			delay = query->timeout - jiffies;
684 			if ((long)delay <= 0)
685 				delay = 1;
686 			queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
687 			break;
688 		}
689 
690 		list_del(&query->list);
691 		ib_sa_disable_local_svc(query);
692 		/* Hold the lock to protect against query cancellation */
693 		if (ib_sa_query_cancelled(query))
694 			ret = -1;
695 		else
696 			ret = ib_post_send_mad(query->mad_buf, NULL);
697 		if (ret) {
698 			mad_send_wc.send_buf = query->mad_buf;
699 			mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
700 			spin_unlock_irqrestore(&ib_nl_request_lock, flags);
701 			send_handler(query->port->agent, &mad_send_wc);
702 			spin_lock_irqsave(&ib_nl_request_lock, flags);
703 		}
704 	}
705 	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
706 }
707 
708 static int ib_nl_handle_set_timeout(struct sk_buff *skb,
709 				    struct netlink_callback *cb)
710 {
711 	const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
712 	int timeout, delta, abs_delta;
713 	const struct nlattr *attr;
714 	unsigned long flags;
715 	struct ib_sa_query *query;
716 	long delay = 0;
717 	struct nlattr *tb[LS_NLA_TYPE_MAX];
718 	int ret;
719 
720 	if (!netlink_capable(skb, CAP_NET_ADMIN))
721 		return -EPERM;
722 
723 	ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
724 			nlmsg_len(nlh), ib_nl_policy);
725 	attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
726 	if (ret || !attr)
727 		goto settimeout_out;
728 
729 	timeout = *(int *) nla_data(attr);
730 	if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
731 		timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
732 	if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
733 		timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
734 
735 	delta = timeout - sa_local_svc_timeout_ms;
736 	if (delta < 0)
737 		abs_delta = -delta;
738 	else
739 		abs_delta = delta;
740 
741 	if (delta != 0) {
742 		spin_lock_irqsave(&ib_nl_request_lock, flags);
743 		sa_local_svc_timeout_ms = timeout;
744 		list_for_each_entry(query, &ib_nl_request_list, list) {
745 			if (delta < 0 && abs_delta > query->timeout)
746 				query->timeout = 0;
747 			else
748 				query->timeout += delta;
749 
750 			/* Get the new delay from the first entry */
751 			if (!delay) {
752 				delay = query->timeout - jiffies;
753 				if (delay <= 0)
754 					delay = 1;
755 			}
756 		}
757 		if (delay)
758 			mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
759 					 (unsigned long)delay);
760 		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
761 	}
762 
763 settimeout_out:
764 	return skb->len;
765 }
766 
767 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
768 {
769 	struct nlattr *tb[LS_NLA_TYPE_MAX];
770 	int ret;
771 
772 	if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
773 		return 0;
774 
775 	ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
776 			nlmsg_len(nlh), ib_nl_policy);
777 	if (ret)
778 		return 0;
779 
780 	return 1;
781 }
782 
783 static int ib_nl_handle_resolve_resp(struct sk_buff *skb,
784 				     struct netlink_callback *cb)
785 {
786 	const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
787 	unsigned long flags;
788 	struct ib_sa_query *query;
789 	struct ib_mad_send_buf *send_buf;
790 	struct ib_mad_send_wc mad_send_wc;
791 	int found = 0;
792 	int ret;
793 
794 	if (!netlink_capable(skb, CAP_NET_ADMIN))
795 		return -EPERM;
796 
797 	spin_lock_irqsave(&ib_nl_request_lock, flags);
798 	list_for_each_entry(query, &ib_nl_request_list, list) {
799 		/*
800 		 * If the query is cancelled, let the timeout routine
801 		 * take care of it.
802 		 */
803 		if (nlh->nlmsg_seq == query->seq) {
804 			found = !ib_sa_query_cancelled(query);
805 			if (found)
806 				list_del(&query->list);
807 			break;
808 		}
809 	}
810 
811 	if (!found) {
812 		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
813 		goto resp_out;
814 	}
815 
816 	send_buf = query->mad_buf;
817 
818 	if (!ib_nl_is_good_resolve_resp(nlh)) {
819 		/* if the result is a failure, send out the packet via IB */
820 		ib_sa_disable_local_svc(query);
821 		ret = ib_post_send_mad(query->mad_buf, NULL);
822 		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
823 		if (ret) {
824 			mad_send_wc.send_buf = send_buf;
825 			mad_send_wc.status = IB_WC_GENERAL_ERR;
826 			send_handler(query->port->agent, &mad_send_wc);
827 		}
828 	} else {
829 		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
830 		ib_nl_process_good_resolve_rsp(query, nlh);
831 	}
832 
833 resp_out:
834 	return skb->len;
835 }
836 
837 static struct ibnl_client_cbs ib_sa_cb_table[] = {
838 	[RDMA_NL_LS_OP_RESOLVE] = {
839 		.dump = ib_nl_handle_resolve_resp,
840 		.module = THIS_MODULE },
841 	[RDMA_NL_LS_OP_SET_TIMEOUT] = {
842 		.dump = ib_nl_handle_set_timeout,
843 		.module = THIS_MODULE },
844 };
845 
846 static void free_sm_ah(struct kref *kref)
847 {
848 	struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
849 
850 	ib_destroy_ah(sm_ah->ah);
851 	kfree(sm_ah);
852 }
853 
854 static void update_sm_ah(struct work_struct *work)
855 {
856 	struct ib_sa_port *port =
857 		container_of(work, struct ib_sa_port, update_task);
858 	struct ib_sa_sm_ah *new_ah;
859 	struct ib_port_attr port_attr;
860 	struct ib_ah_attr   ah_attr;
861 
862 	if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
863 		printk(KERN_WARNING "Couldn't query port\n");
864 		return;
865 	}
866 
867 	new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
868 	if (!new_ah) {
869 		printk(KERN_WARNING "Couldn't allocate new SM AH\n");
870 		return;
871 	}
872 
873 	kref_init(&new_ah->ref);
874 	new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
875 
876 	new_ah->pkey_index = 0;
877 	if (ib_find_pkey(port->agent->device, port->port_num,
878 			 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
879 		printk(KERN_ERR "Couldn't find index for default PKey\n");
880 
881 	memset(&ah_attr, 0, sizeof ah_attr);
882 	ah_attr.dlid     = port_attr.sm_lid;
883 	ah_attr.sl       = port_attr.sm_sl;
884 	ah_attr.port_num = port->port_num;
885 
886 	new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
887 	if (IS_ERR(new_ah->ah)) {
888 		printk(KERN_WARNING "Couldn't create new SM AH\n");
889 		kfree(new_ah);
890 		return;
891 	}
892 
893 	spin_lock_irq(&port->ah_lock);
894 	if (port->sm_ah)
895 		kref_put(&port->sm_ah->ref, free_sm_ah);
896 	port->sm_ah = new_ah;
897 	spin_unlock_irq(&port->ah_lock);
898 
899 }
900 
901 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
902 {
903 	if (event->event == IB_EVENT_PORT_ERR    ||
904 	    event->event == IB_EVENT_PORT_ACTIVE ||
905 	    event->event == IB_EVENT_LID_CHANGE  ||
906 	    event->event == IB_EVENT_PKEY_CHANGE ||
907 	    event->event == IB_EVENT_SM_CHANGE   ||
908 	    event->event == IB_EVENT_CLIENT_REREGISTER) {
909 		unsigned long flags;
910 		struct ib_sa_device *sa_dev =
911 			container_of(handler, typeof(*sa_dev), event_handler);
912 		struct ib_sa_port *port =
913 			&sa_dev->port[event->element.port_num - sa_dev->start_port];
914 
915 		if (!rdma_cap_ib_sa(handler->device, port->port_num))
916 			return;
917 
918 		spin_lock_irqsave(&port->ah_lock, flags);
919 		if (port->sm_ah)
920 			kref_put(&port->sm_ah->ref, free_sm_ah);
921 		port->sm_ah = NULL;
922 		spin_unlock_irqrestore(&port->ah_lock, flags);
923 
924 		queue_work(ib_wq, &sa_dev->port[event->element.port_num -
925 					    sa_dev->start_port].update_task);
926 	}
927 }
928 
929 void ib_sa_register_client(struct ib_sa_client *client)
930 {
931 	atomic_set(&client->users, 1);
932 	init_completion(&client->comp);
933 }
934 EXPORT_SYMBOL(ib_sa_register_client);
935 
936 void ib_sa_unregister_client(struct ib_sa_client *client)
937 {
938 	ib_sa_client_put(client);
939 	wait_for_completion(&client->comp);
940 }
941 EXPORT_SYMBOL(ib_sa_unregister_client);
942 
943 /**
944  * ib_sa_cancel_query - try to cancel an SA query
945  * @id:ID of query to cancel
946  * @query:query pointer to cancel
947  *
948  * Try to cancel an SA query.  If the id and query don't match up or
949  * the query has already completed, nothing is done.  Otherwise the
950  * query is canceled and will complete with a status of -EINTR.
951  */
952 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
953 {
954 	unsigned long flags;
955 	struct ib_mad_agent *agent;
956 	struct ib_mad_send_buf *mad_buf;
957 
958 	spin_lock_irqsave(&idr_lock, flags);
959 	if (idr_find(&query_idr, id) != query) {
960 		spin_unlock_irqrestore(&idr_lock, flags);
961 		return;
962 	}
963 	agent = query->port->agent;
964 	mad_buf = query->mad_buf;
965 	spin_unlock_irqrestore(&idr_lock, flags);
966 
967 	/*
968 	 * If the query is still on the netlink request list, schedule
969 	 * it to be cancelled by the timeout routine. Otherwise, it has been
970 	 * sent to the MAD layer and has to be cancelled from there.
971 	 */
972 	if (!ib_nl_cancel_request(query))
973 		ib_cancel_mad(agent, mad_buf);
974 }
975 EXPORT_SYMBOL(ib_sa_cancel_query);
976 
977 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
978 {
979 	struct ib_sa_device *sa_dev;
980 	struct ib_sa_port   *port;
981 	unsigned long flags;
982 	u8 src_path_mask;
983 
984 	sa_dev = ib_get_client_data(device, &sa_client);
985 	if (!sa_dev)
986 		return 0x7f;
987 
988 	port  = &sa_dev->port[port_num - sa_dev->start_port];
989 	spin_lock_irqsave(&port->ah_lock, flags);
990 	src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
991 	spin_unlock_irqrestore(&port->ah_lock, flags);
992 
993 	return src_path_mask;
994 }
995 
996 int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
997 			 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
998 {
999 	int ret;
1000 	u16 gid_index;
1001 	int use_roce;
1002 	struct net_device *ndev = NULL;
1003 
1004 	memset(ah_attr, 0, sizeof *ah_attr);
1005 	ah_attr->dlid = be16_to_cpu(rec->dlid);
1006 	ah_attr->sl = rec->sl;
1007 	ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
1008 				 get_src_path_mask(device, port_num);
1009 	ah_attr->port_num = port_num;
1010 	ah_attr->static_rate = rec->rate;
1011 
1012 	use_roce = rdma_cap_eth_ah(device, port_num);
1013 
1014 	if (use_roce) {
1015 		struct net_device *idev;
1016 		struct net_device *resolved_dev;
1017 		struct rdma_dev_addr dev_addr = {.bound_dev_if = rec->ifindex,
1018 						 .net = rec->net ? rec->net :
1019 							 &init_net};
1020 		union {
1021 			struct sockaddr     _sockaddr;
1022 			struct sockaddr_in  _sockaddr_in;
1023 			struct sockaddr_in6 _sockaddr_in6;
1024 		} sgid_addr, dgid_addr;
1025 
1026 		if (!device->get_netdev)
1027 			return -EOPNOTSUPP;
1028 
1029 		rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
1030 		rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
1031 
1032 		/* validate the route */
1033 		ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
1034 					    &dgid_addr._sockaddr, &dev_addr);
1035 		if (ret)
1036 			return ret;
1037 
1038 		if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
1039 		     dev_addr.network == RDMA_NETWORK_IPV6) &&
1040 		    rec->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
1041 			return -EINVAL;
1042 
1043 		idev = device->get_netdev(device, port_num);
1044 		if (!idev)
1045 			return -ENODEV;
1046 
1047 		resolved_dev = dev_get_by_index(dev_addr.net,
1048 						dev_addr.bound_dev_if);
1049 		if (resolved_dev->flags & IFF_LOOPBACK) {
1050 			dev_put(resolved_dev);
1051 			resolved_dev = idev;
1052 			dev_hold(resolved_dev);
1053 		}
1054 		ndev = ib_get_ndev_from_path(rec);
1055 		rcu_read_lock();
1056 		if ((ndev && ndev != resolved_dev) ||
1057 		    (resolved_dev != idev &&
1058 		     !rdma_is_upper_dev_rcu(idev, resolved_dev)))
1059 			ret = -EHOSTUNREACH;
1060 		rcu_read_unlock();
1061 		dev_put(idev);
1062 		dev_put(resolved_dev);
1063 		if (ret) {
1064 			if (ndev)
1065 				dev_put(ndev);
1066 			return ret;
1067 		}
1068 	}
1069 
1070 	if (rec->hop_limit > 1 || use_roce) {
1071 		ah_attr->ah_flags = IB_AH_GRH;
1072 		ah_attr->grh.dgid = rec->dgid;
1073 
1074 		ret = ib_find_cached_gid_by_port(device, &rec->sgid,
1075 						 rec->gid_type, port_num, ndev,
1076 						 &gid_index);
1077 		if (ret) {
1078 			if (ndev)
1079 				dev_put(ndev);
1080 			return ret;
1081 		}
1082 
1083 		ah_attr->grh.sgid_index    = gid_index;
1084 		ah_attr->grh.flow_label    = be32_to_cpu(rec->flow_label);
1085 		ah_attr->grh.hop_limit     = rec->hop_limit;
1086 		ah_attr->grh.traffic_class = rec->traffic_class;
1087 		if (ndev)
1088 			dev_put(ndev);
1089 	}
1090 
1091 	if (use_roce)
1092 		memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
1093 
1094 	return 0;
1095 }
1096 EXPORT_SYMBOL(ib_init_ah_from_path);
1097 
1098 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1099 {
1100 	unsigned long flags;
1101 
1102 	spin_lock_irqsave(&query->port->ah_lock, flags);
1103 	if (!query->port->sm_ah) {
1104 		spin_unlock_irqrestore(&query->port->ah_lock, flags);
1105 		return -EAGAIN;
1106 	}
1107 	kref_get(&query->port->sm_ah->ref);
1108 	query->sm_ah = query->port->sm_ah;
1109 	spin_unlock_irqrestore(&query->port->ah_lock, flags);
1110 
1111 	query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1112 					    query->sm_ah->pkey_index,
1113 					    0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1114 					    gfp_mask,
1115 					    IB_MGMT_BASE_VERSION);
1116 	if (IS_ERR(query->mad_buf)) {
1117 		kref_put(&query->sm_ah->ref, free_sm_ah);
1118 		return -ENOMEM;
1119 	}
1120 
1121 	query->mad_buf->ah = query->sm_ah->ah;
1122 
1123 	return 0;
1124 }
1125 
1126 static void free_mad(struct ib_sa_query *query)
1127 {
1128 	ib_free_send_mad(query->mad_buf);
1129 	kref_put(&query->sm_ah->ref, free_sm_ah);
1130 }
1131 
1132 static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
1133 {
1134 	unsigned long flags;
1135 
1136 	memset(mad, 0, sizeof *mad);
1137 
1138 	mad->mad_hdr.base_version  = IB_MGMT_BASE_VERSION;
1139 	mad->mad_hdr.mgmt_class    = IB_MGMT_CLASS_SUBN_ADM;
1140 	mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1141 
1142 	spin_lock_irqsave(&tid_lock, flags);
1143 	mad->mad_hdr.tid           =
1144 		cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1145 	spin_unlock_irqrestore(&tid_lock, flags);
1146 }
1147 
1148 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1149 {
1150 	bool preload = gfpflags_allow_blocking(gfp_mask);
1151 	unsigned long flags;
1152 	int ret, id;
1153 
1154 	if (preload)
1155 		idr_preload(gfp_mask);
1156 	spin_lock_irqsave(&idr_lock, flags);
1157 
1158 	id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1159 
1160 	spin_unlock_irqrestore(&idr_lock, flags);
1161 	if (preload)
1162 		idr_preload_end();
1163 	if (id < 0)
1164 		return id;
1165 
1166 	query->mad_buf->timeout_ms  = timeout_ms;
1167 	query->mad_buf->context[0] = query;
1168 	query->id = id;
1169 
1170 	if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
1171 		if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
1172 			if (!ib_nl_make_request(query, gfp_mask))
1173 				return id;
1174 		}
1175 		ib_sa_disable_local_svc(query);
1176 	}
1177 
1178 	ret = ib_post_send_mad(query->mad_buf, NULL);
1179 	if (ret) {
1180 		spin_lock_irqsave(&idr_lock, flags);
1181 		idr_remove(&query_idr, id);
1182 		spin_unlock_irqrestore(&idr_lock, flags);
1183 	}
1184 
1185 	/*
1186 	 * It's not safe to dereference query any more, because the
1187 	 * send may already have completed and freed the query in
1188 	 * another context.
1189 	 */
1190 	return ret ? ret : id;
1191 }
1192 
1193 void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
1194 {
1195 	ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1196 }
1197 EXPORT_SYMBOL(ib_sa_unpack_path);
1198 
1199 void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute)
1200 {
1201 	ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1202 }
1203 EXPORT_SYMBOL(ib_sa_pack_path);
1204 
1205 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1206 				    int status,
1207 				    struct ib_sa_mad *mad)
1208 {
1209 	struct ib_sa_path_query *query =
1210 		container_of(sa_query, struct ib_sa_path_query, sa_query);
1211 
1212 	if (mad) {
1213 		struct ib_sa_path_rec rec;
1214 
1215 		ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
1216 			  mad->data, &rec);
1217 		rec.net = NULL;
1218 		rec.ifindex = 0;
1219 		rec.gid_type = IB_GID_TYPE_IB;
1220 		memset(rec.dmac, 0, ETH_ALEN);
1221 		query->callback(status, &rec, query->context);
1222 	} else
1223 		query->callback(status, NULL, query->context);
1224 }
1225 
1226 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1227 {
1228 	kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
1229 }
1230 
1231 /**
1232  * ib_sa_path_rec_get - Start a Path get query
1233  * @client:SA client
1234  * @device:device to send query on
1235  * @port_num: port number to send query on
1236  * @rec:Path Record to send in query
1237  * @comp_mask:component mask to send in query
1238  * @timeout_ms:time to wait for response
1239  * @gfp_mask:GFP mask to use for internal allocations
1240  * @callback:function called when query completes, times out or is
1241  * canceled
1242  * @context:opaque user context passed to callback
1243  * @sa_query:query context, used to cancel query
1244  *
1245  * Send a Path Record Get query to the SA to look up a path.  The
1246  * callback function will be called when the query completes (or
1247  * fails); status is 0 for a successful response, -EINTR if the query
1248  * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1249  * occurred sending the query.  The resp parameter of the callback is
1250  * only valid if status is 0.
1251  *
1252  * If the return value of ib_sa_path_rec_get() is negative, it is an
1253  * error code.  Otherwise it is a query ID that can be used to cancel
1254  * the query.
1255  */
1256 int ib_sa_path_rec_get(struct ib_sa_client *client,
1257 		       struct ib_device *device, u8 port_num,
1258 		       struct ib_sa_path_rec *rec,
1259 		       ib_sa_comp_mask comp_mask,
1260 		       int timeout_ms, gfp_t gfp_mask,
1261 		       void (*callback)(int status,
1262 					struct ib_sa_path_rec *resp,
1263 					void *context),
1264 		       void *context,
1265 		       struct ib_sa_query **sa_query)
1266 {
1267 	struct ib_sa_path_query *query;
1268 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1269 	struct ib_sa_port   *port;
1270 	struct ib_mad_agent *agent;
1271 	struct ib_sa_mad *mad;
1272 	int ret;
1273 
1274 	if (!sa_dev)
1275 		return -ENODEV;
1276 
1277 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1278 	agent = port->agent;
1279 
1280 	query = kzalloc(sizeof(*query), gfp_mask);
1281 	if (!query)
1282 		return -ENOMEM;
1283 
1284 	query->sa_query.port     = port;
1285 	ret = alloc_mad(&query->sa_query, gfp_mask);
1286 	if (ret)
1287 		goto err1;
1288 
1289 	ib_sa_client_get(client);
1290 	query->sa_query.client = client;
1291 	query->callback        = callback;
1292 	query->context         = context;
1293 
1294 	mad = query->sa_query.mad_buf->mad;
1295 	init_mad(mad, agent);
1296 
1297 	query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1298 	query->sa_query.release  = ib_sa_path_rec_release;
1299 	mad->mad_hdr.method	 = IB_MGMT_METHOD_GET;
1300 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1301 	mad->sa_hdr.comp_mask	 = comp_mask;
1302 
1303 	ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
1304 
1305 	*sa_query = &query->sa_query;
1306 
1307 	query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1308 	query->sa_query.mad_buf->context[1] = rec;
1309 
1310 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1311 	if (ret < 0)
1312 		goto err2;
1313 
1314 	return ret;
1315 
1316 err2:
1317 	*sa_query = NULL;
1318 	ib_sa_client_put(query->sa_query.client);
1319 	free_mad(&query->sa_query);
1320 
1321 err1:
1322 	kfree(query);
1323 	return ret;
1324 }
1325 EXPORT_SYMBOL(ib_sa_path_rec_get);
1326 
1327 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1328 				    int status,
1329 				    struct ib_sa_mad *mad)
1330 {
1331 	struct ib_sa_service_query *query =
1332 		container_of(sa_query, struct ib_sa_service_query, sa_query);
1333 
1334 	if (mad) {
1335 		struct ib_sa_service_rec rec;
1336 
1337 		ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1338 			  mad->data, &rec);
1339 		query->callback(status, &rec, query->context);
1340 	} else
1341 		query->callback(status, NULL, query->context);
1342 }
1343 
1344 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1345 {
1346 	kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1347 }
1348 
1349 /**
1350  * ib_sa_service_rec_query - Start Service Record operation
1351  * @client:SA client
1352  * @device:device to send request on
1353  * @port_num: port number to send request on
1354  * @method:SA method - should be get, set, or delete
1355  * @rec:Service Record to send in request
1356  * @comp_mask:component mask to send in request
1357  * @timeout_ms:time to wait for response
1358  * @gfp_mask:GFP mask to use for internal allocations
1359  * @callback:function called when request completes, times out or is
1360  * canceled
1361  * @context:opaque user context passed to callback
1362  * @sa_query:request context, used to cancel request
1363  *
1364  * Send a Service Record set/get/delete to the SA to register,
1365  * unregister or query a service record.
1366  * The callback function will be called when the request completes (or
1367  * fails); status is 0 for a successful response, -EINTR if the query
1368  * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1369  * occurred sending the query.  The resp parameter of the callback is
1370  * only valid if status is 0.
1371  *
1372  * If the return value of ib_sa_service_rec_query() is negative, it is an
1373  * error code.  Otherwise it is a request ID that can be used to cancel
1374  * the query.
1375  */
1376 int ib_sa_service_rec_query(struct ib_sa_client *client,
1377 			    struct ib_device *device, u8 port_num, u8 method,
1378 			    struct ib_sa_service_rec *rec,
1379 			    ib_sa_comp_mask comp_mask,
1380 			    int timeout_ms, gfp_t gfp_mask,
1381 			    void (*callback)(int status,
1382 					     struct ib_sa_service_rec *resp,
1383 					     void *context),
1384 			    void *context,
1385 			    struct ib_sa_query **sa_query)
1386 {
1387 	struct ib_sa_service_query *query;
1388 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1389 	struct ib_sa_port   *port;
1390 	struct ib_mad_agent *agent;
1391 	struct ib_sa_mad *mad;
1392 	int ret;
1393 
1394 	if (!sa_dev)
1395 		return -ENODEV;
1396 
1397 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1398 	agent = port->agent;
1399 
1400 	if (method != IB_MGMT_METHOD_GET &&
1401 	    method != IB_MGMT_METHOD_SET &&
1402 	    method != IB_SA_METHOD_DELETE)
1403 		return -EINVAL;
1404 
1405 	query = kzalloc(sizeof(*query), gfp_mask);
1406 	if (!query)
1407 		return -ENOMEM;
1408 
1409 	query->sa_query.port     = port;
1410 	ret = alloc_mad(&query->sa_query, gfp_mask);
1411 	if (ret)
1412 		goto err1;
1413 
1414 	ib_sa_client_get(client);
1415 	query->sa_query.client = client;
1416 	query->callback        = callback;
1417 	query->context         = context;
1418 
1419 	mad = query->sa_query.mad_buf->mad;
1420 	init_mad(mad, agent);
1421 
1422 	query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1423 	query->sa_query.release  = ib_sa_service_rec_release;
1424 	mad->mad_hdr.method	 = method;
1425 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1426 	mad->sa_hdr.comp_mask	 = comp_mask;
1427 
1428 	ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1429 		rec, mad->data);
1430 
1431 	*sa_query = &query->sa_query;
1432 
1433 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1434 	if (ret < 0)
1435 		goto err2;
1436 
1437 	return ret;
1438 
1439 err2:
1440 	*sa_query = NULL;
1441 	ib_sa_client_put(query->sa_query.client);
1442 	free_mad(&query->sa_query);
1443 
1444 err1:
1445 	kfree(query);
1446 	return ret;
1447 }
1448 EXPORT_SYMBOL(ib_sa_service_rec_query);
1449 
1450 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1451 					int status,
1452 					struct ib_sa_mad *mad)
1453 {
1454 	struct ib_sa_mcmember_query *query =
1455 		container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1456 
1457 	if (mad) {
1458 		struct ib_sa_mcmember_rec rec;
1459 
1460 		ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1461 			  mad->data, &rec);
1462 		query->callback(status, &rec, query->context);
1463 	} else
1464 		query->callback(status, NULL, query->context);
1465 }
1466 
1467 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1468 {
1469 	kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1470 }
1471 
1472 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1473 			     struct ib_device *device, u8 port_num,
1474 			     u8 method,
1475 			     struct ib_sa_mcmember_rec *rec,
1476 			     ib_sa_comp_mask comp_mask,
1477 			     int timeout_ms, gfp_t gfp_mask,
1478 			     void (*callback)(int status,
1479 					      struct ib_sa_mcmember_rec *resp,
1480 					      void *context),
1481 			     void *context,
1482 			     struct ib_sa_query **sa_query)
1483 {
1484 	struct ib_sa_mcmember_query *query;
1485 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1486 	struct ib_sa_port   *port;
1487 	struct ib_mad_agent *agent;
1488 	struct ib_sa_mad *mad;
1489 	int ret;
1490 
1491 	if (!sa_dev)
1492 		return -ENODEV;
1493 
1494 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1495 	agent = port->agent;
1496 
1497 	query = kzalloc(sizeof(*query), gfp_mask);
1498 	if (!query)
1499 		return -ENOMEM;
1500 
1501 	query->sa_query.port     = port;
1502 	ret = alloc_mad(&query->sa_query, gfp_mask);
1503 	if (ret)
1504 		goto err1;
1505 
1506 	ib_sa_client_get(client);
1507 	query->sa_query.client = client;
1508 	query->callback        = callback;
1509 	query->context         = context;
1510 
1511 	mad = query->sa_query.mad_buf->mad;
1512 	init_mad(mad, agent);
1513 
1514 	query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1515 	query->sa_query.release  = ib_sa_mcmember_rec_release;
1516 	mad->mad_hdr.method	 = method;
1517 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1518 	mad->sa_hdr.comp_mask	 = comp_mask;
1519 
1520 	ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1521 		rec, mad->data);
1522 
1523 	*sa_query = &query->sa_query;
1524 
1525 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1526 	if (ret < 0)
1527 		goto err2;
1528 
1529 	return ret;
1530 
1531 err2:
1532 	*sa_query = NULL;
1533 	ib_sa_client_put(query->sa_query.client);
1534 	free_mad(&query->sa_query);
1535 
1536 err1:
1537 	kfree(query);
1538 	return ret;
1539 }
1540 
1541 /* Support GuidInfoRecord */
1542 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1543 					int status,
1544 					struct ib_sa_mad *mad)
1545 {
1546 	struct ib_sa_guidinfo_query *query =
1547 		container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1548 
1549 	if (mad) {
1550 		struct ib_sa_guidinfo_rec rec;
1551 
1552 		ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1553 			  mad->data, &rec);
1554 		query->callback(status, &rec, query->context);
1555 	} else
1556 		query->callback(status, NULL, query->context);
1557 }
1558 
1559 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1560 {
1561 	kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1562 }
1563 
1564 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1565 			      struct ib_device *device, u8 port_num,
1566 			      struct ib_sa_guidinfo_rec *rec,
1567 			      ib_sa_comp_mask comp_mask, u8 method,
1568 			      int timeout_ms, gfp_t gfp_mask,
1569 			      void (*callback)(int status,
1570 					       struct ib_sa_guidinfo_rec *resp,
1571 					       void *context),
1572 			      void *context,
1573 			      struct ib_sa_query **sa_query)
1574 {
1575 	struct ib_sa_guidinfo_query *query;
1576 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1577 	struct ib_sa_port *port;
1578 	struct ib_mad_agent *agent;
1579 	struct ib_sa_mad *mad;
1580 	int ret;
1581 
1582 	if (!sa_dev)
1583 		return -ENODEV;
1584 
1585 	if (method != IB_MGMT_METHOD_GET &&
1586 	    method != IB_MGMT_METHOD_SET &&
1587 	    method != IB_SA_METHOD_DELETE) {
1588 		return -EINVAL;
1589 	}
1590 
1591 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1592 	agent = port->agent;
1593 
1594 	query = kzalloc(sizeof(*query), gfp_mask);
1595 	if (!query)
1596 		return -ENOMEM;
1597 
1598 	query->sa_query.port = port;
1599 	ret = alloc_mad(&query->sa_query, gfp_mask);
1600 	if (ret)
1601 		goto err1;
1602 
1603 	ib_sa_client_get(client);
1604 	query->sa_query.client = client;
1605 	query->callback        = callback;
1606 	query->context         = context;
1607 
1608 	mad = query->sa_query.mad_buf->mad;
1609 	init_mad(mad, agent);
1610 
1611 	query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1612 	query->sa_query.release  = ib_sa_guidinfo_rec_release;
1613 
1614 	mad->mad_hdr.method	 = method;
1615 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1616 	mad->sa_hdr.comp_mask	 = comp_mask;
1617 
1618 	ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1619 		mad->data);
1620 
1621 	*sa_query = &query->sa_query;
1622 
1623 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1624 	if (ret < 0)
1625 		goto err2;
1626 
1627 	return ret;
1628 
1629 err2:
1630 	*sa_query = NULL;
1631 	ib_sa_client_put(query->sa_query.client);
1632 	free_mad(&query->sa_query);
1633 
1634 err1:
1635 	kfree(query);
1636 	return ret;
1637 }
1638 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1639 
1640 static void send_handler(struct ib_mad_agent *agent,
1641 			 struct ib_mad_send_wc *mad_send_wc)
1642 {
1643 	struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1644 	unsigned long flags;
1645 
1646 	if (query->callback)
1647 		switch (mad_send_wc->status) {
1648 		case IB_WC_SUCCESS:
1649 			/* No callback -- already got recv */
1650 			break;
1651 		case IB_WC_RESP_TIMEOUT_ERR:
1652 			query->callback(query, -ETIMEDOUT, NULL);
1653 			break;
1654 		case IB_WC_WR_FLUSH_ERR:
1655 			query->callback(query, -EINTR, NULL);
1656 			break;
1657 		default:
1658 			query->callback(query, -EIO, NULL);
1659 			break;
1660 		}
1661 
1662 	spin_lock_irqsave(&idr_lock, flags);
1663 	idr_remove(&query_idr, query->id);
1664 	spin_unlock_irqrestore(&idr_lock, flags);
1665 
1666 	free_mad(query);
1667 	ib_sa_client_put(query->client);
1668 	query->release(query);
1669 }
1670 
1671 static void recv_handler(struct ib_mad_agent *mad_agent,
1672 			 struct ib_mad_recv_wc *mad_recv_wc)
1673 {
1674 	struct ib_sa_query *query;
1675 	struct ib_mad_send_buf *mad_buf;
1676 
1677 	mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;
1678 	query = mad_buf->context[0];
1679 
1680 	if (query->callback) {
1681 		if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
1682 			query->callback(query,
1683 					mad_recv_wc->recv_buf.mad->mad_hdr.status ?
1684 					-EINVAL : 0,
1685 					(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
1686 		else
1687 			query->callback(query, -EIO, NULL);
1688 	}
1689 
1690 	ib_free_recv_mad(mad_recv_wc);
1691 }
1692 
1693 static void ib_sa_add_one(struct ib_device *device)
1694 {
1695 	struct ib_sa_device *sa_dev;
1696 	int s, e, i;
1697 	int count = 0;
1698 
1699 	s = rdma_start_port(device);
1700 	e = rdma_end_port(device);
1701 
1702 	sa_dev = kzalloc(sizeof *sa_dev +
1703 			 (e - s + 1) * sizeof (struct ib_sa_port),
1704 			 GFP_KERNEL);
1705 	if (!sa_dev)
1706 		return;
1707 
1708 	sa_dev->start_port = s;
1709 	sa_dev->end_port   = e;
1710 
1711 	for (i = 0; i <= e - s; ++i) {
1712 		spin_lock_init(&sa_dev->port[i].ah_lock);
1713 		if (!rdma_cap_ib_sa(device, i + 1))
1714 			continue;
1715 
1716 		sa_dev->port[i].sm_ah    = NULL;
1717 		sa_dev->port[i].port_num = i + s;
1718 
1719 		sa_dev->port[i].agent =
1720 			ib_register_mad_agent(device, i + s, IB_QPT_GSI,
1721 					      NULL, 0, send_handler,
1722 					      recv_handler, sa_dev, 0);
1723 		if (IS_ERR(sa_dev->port[i].agent))
1724 			goto err;
1725 
1726 		INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
1727 
1728 		count++;
1729 	}
1730 
1731 	if (!count)
1732 		goto free;
1733 
1734 	ib_set_client_data(device, &sa_client, sa_dev);
1735 
1736 	/*
1737 	 * We register our event handler after everything is set up,
1738 	 * and then update our cached info after the event handler is
1739 	 * registered to avoid any problems if a port changes state
1740 	 * during our initialization.
1741 	 */
1742 
1743 	INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
1744 	if (ib_register_event_handler(&sa_dev->event_handler))
1745 		goto err;
1746 
1747 	for (i = 0; i <= e - s; ++i) {
1748 		if (rdma_cap_ib_sa(device, i + 1))
1749 			update_sm_ah(&sa_dev->port[i].update_task);
1750 	}
1751 
1752 	return;
1753 
1754 err:
1755 	while (--i >= 0) {
1756 		if (rdma_cap_ib_sa(device, i + 1))
1757 			ib_unregister_mad_agent(sa_dev->port[i].agent);
1758 	}
1759 free:
1760 	kfree(sa_dev);
1761 	return;
1762 }
1763 
1764 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
1765 {
1766 	struct ib_sa_device *sa_dev = client_data;
1767 	int i;
1768 
1769 	if (!sa_dev)
1770 		return;
1771 
1772 	ib_unregister_event_handler(&sa_dev->event_handler);
1773 
1774 	flush_workqueue(ib_wq);
1775 
1776 	for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1777 		if (rdma_cap_ib_sa(device, i + 1)) {
1778 			ib_unregister_mad_agent(sa_dev->port[i].agent);
1779 			if (sa_dev->port[i].sm_ah)
1780 				kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1781 		}
1782 
1783 	}
1784 
1785 	kfree(sa_dev);
1786 }
1787 
1788 static int __init ib_sa_init(void)
1789 {
1790 	int ret;
1791 
1792 	get_random_bytes(&tid, sizeof tid);
1793 
1794 	atomic_set(&ib_nl_sa_request_seq, 0);
1795 
1796 	ret = ib_register_client(&sa_client);
1797 	if (ret) {
1798 		printk(KERN_ERR "Couldn't register ib_sa client\n");
1799 		goto err1;
1800 	}
1801 
1802 	ret = mcast_init();
1803 	if (ret) {
1804 		printk(KERN_ERR "Couldn't initialize multicast handling\n");
1805 		goto err2;
1806 	}
1807 
1808 	ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq");
1809 	if (!ib_nl_wq) {
1810 		ret = -ENOMEM;
1811 		goto err3;
1812 	}
1813 
1814 	if (ibnl_add_client(RDMA_NL_LS, RDMA_NL_LS_NUM_OPS,
1815 			    ib_sa_cb_table)) {
1816 		pr_err("Failed to add netlink callback\n");
1817 		ret = -EINVAL;
1818 		goto err4;
1819 	}
1820 	INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
1821 
1822 	return 0;
1823 err4:
1824 	destroy_workqueue(ib_nl_wq);
1825 err3:
1826 	mcast_cleanup();
1827 err2:
1828 	ib_unregister_client(&sa_client);
1829 err1:
1830 	return ret;
1831 }
1832 
1833 static void __exit ib_sa_cleanup(void)
1834 {
1835 	ibnl_remove_client(RDMA_NL_LS);
1836 	cancel_delayed_work(&ib_nl_timed_work);
1837 	flush_workqueue(ib_nl_wq);
1838 	destroy_workqueue(ib_nl_wq);
1839 	mcast_cleanup();
1840 	ib_unregister_client(&sa_client);
1841 	idr_destroy(&query_idr);
1842 }
1843 
1844 module_init(ib_sa_init);
1845 module_exit(ib_sa_cleanup);
1846