xref: /linux/drivers/infiniband/core/sa_query.c (revision ed4bc1890b4984d0af447ad3cc1f93541623f8f3)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
4  * Copyright (c) 2006 Intel Corporation.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/xarray.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
52 #include <rdma/ib_addr.h>
53 #include <rdma/opa_addr.h>
54 #include "sa.h"
55 #include "core_priv.h"
56 
57 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN		100
58 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT		2000
59 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX		200000
60 #define IB_SA_CPI_MAX_RETRY_CNT			3
61 #define IB_SA_CPI_RETRY_WAIT			1000 /*msecs */
62 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
63 
64 struct ib_sa_sm_ah {
65 	struct ib_ah        *ah;
66 	struct kref          ref;
67 	u16		     pkey_index;
68 	u8		     src_path_mask;
69 };
70 
71 enum rdma_class_port_info_type {
72 	RDMA_CLASS_PORT_INFO_IB,
73 	RDMA_CLASS_PORT_INFO_OPA
74 };
75 
76 struct rdma_class_port_info {
77 	enum rdma_class_port_info_type type;
78 	union {
79 		struct ib_class_port_info ib;
80 		struct opa_class_port_info opa;
81 	};
82 };
83 
84 struct ib_sa_classport_cache {
85 	bool valid;
86 	int retry_cnt;
87 	struct rdma_class_port_info data;
88 };
89 
90 struct ib_sa_port {
91 	struct ib_mad_agent *agent;
92 	struct ib_sa_sm_ah  *sm_ah;
93 	struct work_struct   update_task;
94 	struct ib_sa_classport_cache classport_info;
95 	struct delayed_work ib_cpi_work;
96 	spinlock_t                   classport_lock; /* protects class port info set */
97 	spinlock_t           ah_lock;
98 	u8                   port_num;
99 };
100 
101 struct ib_sa_device {
102 	int                     start_port, end_port;
103 	struct ib_event_handler event_handler;
104 	struct ib_sa_port port[];
105 };
106 
107 struct ib_sa_query {
108 	void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
109 	void (*release)(struct ib_sa_query *);
110 	struct ib_sa_client    *client;
111 	struct ib_sa_port      *port;
112 	struct ib_mad_send_buf *mad_buf;
113 	struct ib_sa_sm_ah     *sm_ah;
114 	int			id;
115 	u32			flags;
116 	struct list_head	list; /* Local svc request list */
117 	u32			seq; /* Local svc request sequence number */
118 	unsigned long		timeout; /* Local svc timeout */
119 	u8			path_use; /* How will the pathrecord be used */
120 };
121 
122 #define IB_SA_ENABLE_LOCAL_SERVICE	0x00000001
123 #define IB_SA_CANCEL			0x00000002
124 #define IB_SA_QUERY_OPA			0x00000004
125 
126 struct ib_sa_service_query {
127 	void (*callback)(int, struct ib_sa_service_rec *, void *);
128 	void *context;
129 	struct ib_sa_query sa_query;
130 };
131 
132 struct ib_sa_path_query {
133 	void (*callback)(int, struct sa_path_rec *, void *);
134 	void *context;
135 	struct ib_sa_query sa_query;
136 	struct sa_path_rec *conv_pr;
137 };
138 
139 struct ib_sa_guidinfo_query {
140 	void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
141 	void *context;
142 	struct ib_sa_query sa_query;
143 };
144 
145 struct ib_sa_classport_info_query {
146 	void (*callback)(void *);
147 	void *context;
148 	struct ib_sa_query sa_query;
149 };
150 
151 struct ib_sa_mcmember_query {
152 	void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
153 	void *context;
154 	struct ib_sa_query sa_query;
155 };
156 
157 static LIST_HEAD(ib_nl_request_list);
158 static DEFINE_SPINLOCK(ib_nl_request_lock);
159 static atomic_t ib_nl_sa_request_seq;
160 static struct workqueue_struct *ib_nl_wq;
161 static struct delayed_work ib_nl_timed_work;
162 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
163 	[LS_NLA_TYPE_PATH_RECORD]	= {.type = NLA_BINARY,
164 		.len = sizeof(struct ib_path_rec_data)},
165 	[LS_NLA_TYPE_TIMEOUT]		= {.type = NLA_U32},
166 	[LS_NLA_TYPE_SERVICE_ID]	= {.type = NLA_U64},
167 	[LS_NLA_TYPE_DGID]		= {.type = NLA_BINARY,
168 		.len = sizeof(struct rdma_nla_ls_gid)},
169 	[LS_NLA_TYPE_SGID]		= {.type = NLA_BINARY,
170 		.len = sizeof(struct rdma_nla_ls_gid)},
171 	[LS_NLA_TYPE_TCLASS]		= {.type = NLA_U8},
172 	[LS_NLA_TYPE_PKEY]		= {.type = NLA_U16},
173 	[LS_NLA_TYPE_QOS_CLASS]		= {.type = NLA_U16},
174 };
175 
176 
177 static int ib_sa_add_one(struct ib_device *device);
178 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
179 
180 static struct ib_client sa_client = {
181 	.name   = "sa",
182 	.add    = ib_sa_add_one,
183 	.remove = ib_sa_remove_one
184 };
185 
186 static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
187 
188 static DEFINE_SPINLOCK(tid_lock);
189 static u32 tid;
190 
191 #define PATH_REC_FIELD(field) \
192 	.struct_offset_bytes = offsetof(struct sa_path_rec, field),	\
193 	.struct_size_bytes   = sizeof_field(struct sa_path_rec, field),	\
194 	.field_name          = "sa_path_rec:" #field
195 
196 static const struct ib_field path_rec_table[] = {
197 	{ PATH_REC_FIELD(service_id),
198 	  .offset_words = 0,
199 	  .offset_bits  = 0,
200 	  .size_bits    = 64 },
201 	{ PATH_REC_FIELD(dgid),
202 	  .offset_words = 2,
203 	  .offset_bits  = 0,
204 	  .size_bits    = 128 },
205 	{ PATH_REC_FIELD(sgid),
206 	  .offset_words = 6,
207 	  .offset_bits  = 0,
208 	  .size_bits    = 128 },
209 	{ PATH_REC_FIELD(ib.dlid),
210 	  .offset_words = 10,
211 	  .offset_bits  = 0,
212 	  .size_bits    = 16 },
213 	{ PATH_REC_FIELD(ib.slid),
214 	  .offset_words = 10,
215 	  .offset_bits  = 16,
216 	  .size_bits    = 16 },
217 	{ PATH_REC_FIELD(ib.raw_traffic),
218 	  .offset_words = 11,
219 	  .offset_bits  = 0,
220 	  .size_bits    = 1 },
221 	{ RESERVED,
222 	  .offset_words = 11,
223 	  .offset_bits  = 1,
224 	  .size_bits    = 3 },
225 	{ PATH_REC_FIELD(flow_label),
226 	  .offset_words = 11,
227 	  .offset_bits  = 4,
228 	  .size_bits    = 20 },
229 	{ PATH_REC_FIELD(hop_limit),
230 	  .offset_words = 11,
231 	  .offset_bits  = 24,
232 	  .size_bits    = 8 },
233 	{ PATH_REC_FIELD(traffic_class),
234 	  .offset_words = 12,
235 	  .offset_bits  = 0,
236 	  .size_bits    = 8 },
237 	{ PATH_REC_FIELD(reversible),
238 	  .offset_words = 12,
239 	  .offset_bits  = 8,
240 	  .size_bits    = 1 },
241 	{ PATH_REC_FIELD(numb_path),
242 	  .offset_words = 12,
243 	  .offset_bits  = 9,
244 	  .size_bits    = 7 },
245 	{ PATH_REC_FIELD(pkey),
246 	  .offset_words = 12,
247 	  .offset_bits  = 16,
248 	  .size_bits    = 16 },
249 	{ PATH_REC_FIELD(qos_class),
250 	  .offset_words = 13,
251 	  .offset_bits  = 0,
252 	  .size_bits    = 12 },
253 	{ PATH_REC_FIELD(sl),
254 	  .offset_words = 13,
255 	  .offset_bits  = 12,
256 	  .size_bits    = 4 },
257 	{ PATH_REC_FIELD(mtu_selector),
258 	  .offset_words = 13,
259 	  .offset_bits  = 16,
260 	  .size_bits    = 2 },
261 	{ PATH_REC_FIELD(mtu),
262 	  .offset_words = 13,
263 	  .offset_bits  = 18,
264 	  .size_bits    = 6 },
265 	{ PATH_REC_FIELD(rate_selector),
266 	  .offset_words = 13,
267 	  .offset_bits  = 24,
268 	  .size_bits    = 2 },
269 	{ PATH_REC_FIELD(rate),
270 	  .offset_words = 13,
271 	  .offset_bits  = 26,
272 	  .size_bits    = 6 },
273 	{ PATH_REC_FIELD(packet_life_time_selector),
274 	  .offset_words = 14,
275 	  .offset_bits  = 0,
276 	  .size_bits    = 2 },
277 	{ PATH_REC_FIELD(packet_life_time),
278 	  .offset_words = 14,
279 	  .offset_bits  = 2,
280 	  .size_bits    = 6 },
281 	{ PATH_REC_FIELD(preference),
282 	  .offset_words = 14,
283 	  .offset_bits  = 8,
284 	  .size_bits    = 8 },
285 	{ RESERVED,
286 	  .offset_words = 14,
287 	  .offset_bits  = 16,
288 	  .size_bits    = 48 },
289 };
290 
291 #define OPA_PATH_REC_FIELD(field) \
292 	.struct_offset_bytes = \
293 		offsetof(struct sa_path_rec, field), \
294 	.struct_size_bytes   = \
295 		sizeof_field(struct sa_path_rec, field),	\
296 	.field_name          = "sa_path_rec:" #field
297 
298 static const struct ib_field opa_path_rec_table[] = {
299 	{ OPA_PATH_REC_FIELD(service_id),
300 	  .offset_words = 0,
301 	  .offset_bits  = 0,
302 	  .size_bits    = 64 },
303 	{ OPA_PATH_REC_FIELD(dgid),
304 	  .offset_words = 2,
305 	  .offset_bits  = 0,
306 	  .size_bits    = 128 },
307 	{ OPA_PATH_REC_FIELD(sgid),
308 	  .offset_words = 6,
309 	  .offset_bits  = 0,
310 	  .size_bits    = 128 },
311 	{ OPA_PATH_REC_FIELD(opa.dlid),
312 	  .offset_words = 10,
313 	  .offset_bits  = 0,
314 	  .size_bits    = 32 },
315 	{ OPA_PATH_REC_FIELD(opa.slid),
316 	  .offset_words = 11,
317 	  .offset_bits  = 0,
318 	  .size_bits    = 32 },
319 	{ OPA_PATH_REC_FIELD(opa.raw_traffic),
320 	  .offset_words = 12,
321 	  .offset_bits  = 0,
322 	  .size_bits    = 1 },
323 	{ RESERVED,
324 	  .offset_words = 12,
325 	  .offset_bits  = 1,
326 	  .size_bits    = 3 },
327 	{ OPA_PATH_REC_FIELD(flow_label),
328 	  .offset_words = 12,
329 	  .offset_bits  = 4,
330 	  .size_bits    = 20 },
331 	{ OPA_PATH_REC_FIELD(hop_limit),
332 	  .offset_words = 12,
333 	  .offset_bits  = 24,
334 	  .size_bits    = 8 },
335 	{ OPA_PATH_REC_FIELD(traffic_class),
336 	  .offset_words = 13,
337 	  .offset_bits  = 0,
338 	  .size_bits    = 8 },
339 	{ OPA_PATH_REC_FIELD(reversible),
340 	  .offset_words = 13,
341 	  .offset_bits  = 8,
342 	  .size_bits    = 1 },
343 	{ OPA_PATH_REC_FIELD(numb_path),
344 	  .offset_words = 13,
345 	  .offset_bits  = 9,
346 	  .size_bits    = 7 },
347 	{ OPA_PATH_REC_FIELD(pkey),
348 	  .offset_words = 13,
349 	  .offset_bits  = 16,
350 	  .size_bits    = 16 },
351 	{ OPA_PATH_REC_FIELD(opa.l2_8B),
352 	  .offset_words = 14,
353 	  .offset_bits  = 0,
354 	  .size_bits    = 1 },
355 	{ OPA_PATH_REC_FIELD(opa.l2_10B),
356 	  .offset_words = 14,
357 	  .offset_bits  = 1,
358 	  .size_bits    = 1 },
359 	{ OPA_PATH_REC_FIELD(opa.l2_9B),
360 	  .offset_words = 14,
361 	  .offset_bits  = 2,
362 	  .size_bits    = 1 },
363 	{ OPA_PATH_REC_FIELD(opa.l2_16B),
364 	  .offset_words = 14,
365 	  .offset_bits  = 3,
366 	  .size_bits    = 1 },
367 	{ RESERVED,
368 	  .offset_words = 14,
369 	  .offset_bits  = 4,
370 	  .size_bits    = 2 },
371 	{ OPA_PATH_REC_FIELD(opa.qos_type),
372 	  .offset_words = 14,
373 	  .offset_bits  = 6,
374 	  .size_bits    = 2 },
375 	{ OPA_PATH_REC_FIELD(opa.qos_priority),
376 	  .offset_words = 14,
377 	  .offset_bits  = 8,
378 	  .size_bits    = 8 },
379 	{ RESERVED,
380 	  .offset_words = 14,
381 	  .offset_bits  = 16,
382 	  .size_bits    = 3 },
383 	{ OPA_PATH_REC_FIELD(sl),
384 	  .offset_words = 14,
385 	  .offset_bits  = 19,
386 	  .size_bits    = 5 },
387 	{ RESERVED,
388 	  .offset_words = 14,
389 	  .offset_bits  = 24,
390 	  .size_bits    = 8 },
391 	{ OPA_PATH_REC_FIELD(mtu_selector),
392 	  .offset_words = 15,
393 	  .offset_bits  = 0,
394 	  .size_bits    = 2 },
395 	{ OPA_PATH_REC_FIELD(mtu),
396 	  .offset_words = 15,
397 	  .offset_bits  = 2,
398 	  .size_bits    = 6 },
399 	{ OPA_PATH_REC_FIELD(rate_selector),
400 	  .offset_words = 15,
401 	  .offset_bits  = 8,
402 	  .size_bits    = 2 },
403 	{ OPA_PATH_REC_FIELD(rate),
404 	  .offset_words = 15,
405 	  .offset_bits  = 10,
406 	  .size_bits    = 6 },
407 	{ OPA_PATH_REC_FIELD(packet_life_time_selector),
408 	  .offset_words = 15,
409 	  .offset_bits  = 16,
410 	  .size_bits    = 2 },
411 	{ OPA_PATH_REC_FIELD(packet_life_time),
412 	  .offset_words = 15,
413 	  .offset_bits  = 18,
414 	  .size_bits    = 6 },
415 	{ OPA_PATH_REC_FIELD(preference),
416 	  .offset_words = 15,
417 	  .offset_bits  = 24,
418 	  .size_bits    = 8 },
419 };
420 
421 #define MCMEMBER_REC_FIELD(field) \
422 	.struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field),	\
423 	.struct_size_bytes   = sizeof_field(struct ib_sa_mcmember_rec, field),	\
424 	.field_name          = "sa_mcmember_rec:" #field
425 
426 static const struct ib_field mcmember_rec_table[] = {
427 	{ MCMEMBER_REC_FIELD(mgid),
428 	  .offset_words = 0,
429 	  .offset_bits  = 0,
430 	  .size_bits    = 128 },
431 	{ MCMEMBER_REC_FIELD(port_gid),
432 	  .offset_words = 4,
433 	  .offset_bits  = 0,
434 	  .size_bits    = 128 },
435 	{ MCMEMBER_REC_FIELD(qkey),
436 	  .offset_words = 8,
437 	  .offset_bits  = 0,
438 	  .size_bits    = 32 },
439 	{ MCMEMBER_REC_FIELD(mlid),
440 	  .offset_words = 9,
441 	  .offset_bits  = 0,
442 	  .size_bits    = 16 },
443 	{ MCMEMBER_REC_FIELD(mtu_selector),
444 	  .offset_words = 9,
445 	  .offset_bits  = 16,
446 	  .size_bits    = 2 },
447 	{ MCMEMBER_REC_FIELD(mtu),
448 	  .offset_words = 9,
449 	  .offset_bits  = 18,
450 	  .size_bits    = 6 },
451 	{ MCMEMBER_REC_FIELD(traffic_class),
452 	  .offset_words = 9,
453 	  .offset_bits  = 24,
454 	  .size_bits    = 8 },
455 	{ MCMEMBER_REC_FIELD(pkey),
456 	  .offset_words = 10,
457 	  .offset_bits  = 0,
458 	  .size_bits    = 16 },
459 	{ MCMEMBER_REC_FIELD(rate_selector),
460 	  .offset_words = 10,
461 	  .offset_bits  = 16,
462 	  .size_bits    = 2 },
463 	{ MCMEMBER_REC_FIELD(rate),
464 	  .offset_words = 10,
465 	  .offset_bits  = 18,
466 	  .size_bits    = 6 },
467 	{ MCMEMBER_REC_FIELD(packet_life_time_selector),
468 	  .offset_words = 10,
469 	  .offset_bits  = 24,
470 	  .size_bits    = 2 },
471 	{ MCMEMBER_REC_FIELD(packet_life_time),
472 	  .offset_words = 10,
473 	  .offset_bits  = 26,
474 	  .size_bits    = 6 },
475 	{ MCMEMBER_REC_FIELD(sl),
476 	  .offset_words = 11,
477 	  .offset_bits  = 0,
478 	  .size_bits    = 4 },
479 	{ MCMEMBER_REC_FIELD(flow_label),
480 	  .offset_words = 11,
481 	  .offset_bits  = 4,
482 	  .size_bits    = 20 },
483 	{ MCMEMBER_REC_FIELD(hop_limit),
484 	  .offset_words = 11,
485 	  .offset_bits  = 24,
486 	  .size_bits    = 8 },
487 	{ MCMEMBER_REC_FIELD(scope),
488 	  .offset_words = 12,
489 	  .offset_bits  = 0,
490 	  .size_bits    = 4 },
491 	{ MCMEMBER_REC_FIELD(join_state),
492 	  .offset_words = 12,
493 	  .offset_bits  = 4,
494 	  .size_bits    = 4 },
495 	{ MCMEMBER_REC_FIELD(proxy_join),
496 	  .offset_words = 12,
497 	  .offset_bits  = 8,
498 	  .size_bits    = 1 },
499 	{ RESERVED,
500 	  .offset_words = 12,
501 	  .offset_bits  = 9,
502 	  .size_bits    = 23 },
503 };
504 
505 #define SERVICE_REC_FIELD(field) \
506 	.struct_offset_bytes = offsetof(struct ib_sa_service_rec, field),	\
507 	.struct_size_bytes   = sizeof_field(struct ib_sa_service_rec, field),	\
508 	.field_name          = "sa_service_rec:" #field
509 
510 static const struct ib_field service_rec_table[] = {
511 	{ SERVICE_REC_FIELD(id),
512 	  .offset_words = 0,
513 	  .offset_bits  = 0,
514 	  .size_bits    = 64 },
515 	{ SERVICE_REC_FIELD(gid),
516 	  .offset_words = 2,
517 	  .offset_bits  = 0,
518 	  .size_bits    = 128 },
519 	{ SERVICE_REC_FIELD(pkey),
520 	  .offset_words = 6,
521 	  .offset_bits  = 0,
522 	  .size_bits    = 16 },
523 	{ SERVICE_REC_FIELD(lease),
524 	  .offset_words = 7,
525 	  .offset_bits  = 0,
526 	  .size_bits    = 32 },
527 	{ SERVICE_REC_FIELD(key),
528 	  .offset_words = 8,
529 	  .offset_bits  = 0,
530 	  .size_bits    = 128 },
531 	{ SERVICE_REC_FIELD(name),
532 	  .offset_words = 12,
533 	  .offset_bits  = 0,
534 	  .size_bits    = 64*8 },
535 	{ SERVICE_REC_FIELD(data8),
536 	  .offset_words = 28,
537 	  .offset_bits  = 0,
538 	  .size_bits    = 16*8 },
539 	{ SERVICE_REC_FIELD(data16),
540 	  .offset_words = 32,
541 	  .offset_bits  = 0,
542 	  .size_bits    = 8*16 },
543 	{ SERVICE_REC_FIELD(data32),
544 	  .offset_words = 36,
545 	  .offset_bits  = 0,
546 	  .size_bits    = 4*32 },
547 	{ SERVICE_REC_FIELD(data64),
548 	  .offset_words = 40,
549 	  .offset_bits  = 0,
550 	  .size_bits    = 2*64 },
551 };
552 
553 #define CLASSPORTINFO_REC_FIELD(field) \
554 	.struct_offset_bytes = offsetof(struct ib_class_port_info, field),	\
555 	.struct_size_bytes   = sizeof_field(struct ib_class_port_info, field),	\
556 	.field_name          = "ib_class_port_info:" #field
557 
558 static const struct ib_field ib_classport_info_rec_table[] = {
559 	{ CLASSPORTINFO_REC_FIELD(base_version),
560 	  .offset_words = 0,
561 	  .offset_bits  = 0,
562 	  .size_bits    = 8 },
563 	{ CLASSPORTINFO_REC_FIELD(class_version),
564 	  .offset_words = 0,
565 	  .offset_bits  = 8,
566 	  .size_bits    = 8 },
567 	{ CLASSPORTINFO_REC_FIELD(capability_mask),
568 	  .offset_words = 0,
569 	  .offset_bits  = 16,
570 	  .size_bits    = 16 },
571 	{ CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
572 	  .offset_words = 1,
573 	  .offset_bits  = 0,
574 	  .size_bits    = 32 },
575 	{ CLASSPORTINFO_REC_FIELD(redirect_gid),
576 	  .offset_words = 2,
577 	  .offset_bits  = 0,
578 	  .size_bits    = 128 },
579 	{ CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
580 	  .offset_words = 6,
581 	  .offset_bits  = 0,
582 	  .size_bits    = 32 },
583 	{ CLASSPORTINFO_REC_FIELD(redirect_lid),
584 	  .offset_words = 7,
585 	  .offset_bits  = 0,
586 	  .size_bits    = 16 },
587 	{ CLASSPORTINFO_REC_FIELD(redirect_pkey),
588 	  .offset_words = 7,
589 	  .offset_bits  = 16,
590 	  .size_bits    = 16 },
591 
592 	{ CLASSPORTINFO_REC_FIELD(redirect_qp),
593 	  .offset_words = 8,
594 	  .offset_bits  = 0,
595 	  .size_bits    = 32 },
596 	{ CLASSPORTINFO_REC_FIELD(redirect_qkey),
597 	  .offset_words = 9,
598 	  .offset_bits  = 0,
599 	  .size_bits    = 32 },
600 
601 	{ CLASSPORTINFO_REC_FIELD(trap_gid),
602 	  .offset_words = 10,
603 	  .offset_bits  = 0,
604 	  .size_bits    = 128 },
605 	{ CLASSPORTINFO_REC_FIELD(trap_tcslfl),
606 	  .offset_words = 14,
607 	  .offset_bits  = 0,
608 	  .size_bits    = 32 },
609 
610 	{ CLASSPORTINFO_REC_FIELD(trap_lid),
611 	  .offset_words = 15,
612 	  .offset_bits  = 0,
613 	  .size_bits    = 16 },
614 	{ CLASSPORTINFO_REC_FIELD(trap_pkey),
615 	  .offset_words = 15,
616 	  .offset_bits  = 16,
617 	  .size_bits    = 16 },
618 
619 	{ CLASSPORTINFO_REC_FIELD(trap_hlqp),
620 	  .offset_words = 16,
621 	  .offset_bits  = 0,
622 	  .size_bits    = 32 },
623 	{ CLASSPORTINFO_REC_FIELD(trap_qkey),
624 	  .offset_words = 17,
625 	  .offset_bits  = 0,
626 	  .size_bits    = 32 },
627 };
628 
629 #define OPA_CLASSPORTINFO_REC_FIELD(field) \
630 	.struct_offset_bytes =\
631 		offsetof(struct opa_class_port_info, field),	\
632 	.struct_size_bytes   = \
633 		sizeof_field(struct opa_class_port_info, field),	\
634 	.field_name          = "opa_class_port_info:" #field
635 
636 static const struct ib_field opa_classport_info_rec_table[] = {
637 	{ OPA_CLASSPORTINFO_REC_FIELD(base_version),
638 	  .offset_words = 0,
639 	  .offset_bits  = 0,
640 	  .size_bits    = 8 },
641 	{ OPA_CLASSPORTINFO_REC_FIELD(class_version),
642 	  .offset_words = 0,
643 	  .offset_bits  = 8,
644 	  .size_bits    = 8 },
645 	{ OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
646 	  .offset_words = 0,
647 	  .offset_bits  = 16,
648 	  .size_bits    = 16 },
649 	{ OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
650 	  .offset_words = 1,
651 	  .offset_bits  = 0,
652 	  .size_bits    = 32 },
653 	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
654 	  .offset_words = 2,
655 	  .offset_bits  = 0,
656 	  .size_bits    = 128 },
657 	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
658 	  .offset_words = 6,
659 	  .offset_bits  = 0,
660 	  .size_bits    = 32 },
661 	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
662 	  .offset_words = 7,
663 	  .offset_bits  = 0,
664 	  .size_bits    = 32 },
665 	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
666 	  .offset_words = 8,
667 	  .offset_bits  = 0,
668 	  .size_bits    = 32 },
669 	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
670 	  .offset_words = 9,
671 	  .offset_bits  = 0,
672 	  .size_bits    = 32 },
673 	{ OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
674 	  .offset_words = 10,
675 	  .offset_bits  = 0,
676 	  .size_bits    = 128 },
677 	{ OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
678 	  .offset_words = 14,
679 	  .offset_bits  = 0,
680 	  .size_bits    = 32 },
681 	{ OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
682 	  .offset_words = 15,
683 	  .offset_bits  = 0,
684 	  .size_bits    = 32 },
685 	{ OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
686 	  .offset_words = 16,
687 	  .offset_bits  = 0,
688 	  .size_bits    = 32 },
689 	{ OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
690 	  .offset_words = 17,
691 	  .offset_bits  = 0,
692 	  .size_bits    = 32 },
693 	{ OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
694 	  .offset_words = 18,
695 	  .offset_bits  = 0,
696 	  .size_bits    = 16 },
697 	{ OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
698 	  .offset_words = 18,
699 	  .offset_bits  = 16,
700 	  .size_bits    = 16 },
701 	{ OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
702 	  .offset_words = 19,
703 	  .offset_bits  = 0,
704 	  .size_bits    = 8 },
705 	{ RESERVED,
706 	  .offset_words = 19,
707 	  .offset_bits  = 8,
708 	  .size_bits    = 24 },
709 };
710 
711 #define GUIDINFO_REC_FIELD(field) \
712 	.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field),	\
713 	.struct_size_bytes   = sizeof_field(struct ib_sa_guidinfo_rec, field),	\
714 	.field_name          = "sa_guidinfo_rec:" #field
715 
716 static const struct ib_field guidinfo_rec_table[] = {
717 	{ GUIDINFO_REC_FIELD(lid),
718 	  .offset_words = 0,
719 	  .offset_bits  = 0,
720 	  .size_bits    = 16 },
721 	{ GUIDINFO_REC_FIELD(block_num),
722 	  .offset_words = 0,
723 	  .offset_bits  = 16,
724 	  .size_bits    = 8 },
725 	{ GUIDINFO_REC_FIELD(res1),
726 	  .offset_words = 0,
727 	  .offset_bits  = 24,
728 	  .size_bits    = 8 },
729 	{ GUIDINFO_REC_FIELD(res2),
730 	  .offset_words = 1,
731 	  .offset_bits  = 0,
732 	  .size_bits    = 32 },
733 	{ GUIDINFO_REC_FIELD(guid_info_list),
734 	  .offset_words = 2,
735 	  .offset_bits  = 0,
736 	  .size_bits    = 512 },
737 };
738 
739 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
740 {
741 	query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
742 }
743 
744 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
745 {
746 	return (query->flags & IB_SA_CANCEL);
747 }
748 
749 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
750 				     struct ib_sa_query *query)
751 {
752 	struct sa_path_rec *sa_rec = query->mad_buf->context[1];
753 	struct ib_sa_mad *mad = query->mad_buf->mad;
754 	ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
755 	u16 val16;
756 	u64 val64;
757 	struct rdma_ls_resolve_header *header;
758 
759 	query->mad_buf->context[1] = NULL;
760 
761 	/* Construct the family header first */
762 	header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
763 	memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
764 	       LS_DEVICE_NAME_MAX);
765 	header->port_num = query->port->port_num;
766 
767 	if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
768 	    sa_rec->reversible != 0)
769 		query->path_use = LS_RESOLVE_PATH_USE_GMP;
770 	else
771 		query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
772 	header->path_use = query->path_use;
773 
774 	/* Now build the attributes */
775 	if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
776 		val64 = be64_to_cpu(sa_rec->service_id);
777 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
778 			sizeof(val64), &val64);
779 	}
780 	if (comp_mask & IB_SA_PATH_REC_DGID)
781 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
782 			sizeof(sa_rec->dgid), &sa_rec->dgid);
783 	if (comp_mask & IB_SA_PATH_REC_SGID)
784 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
785 			sizeof(sa_rec->sgid), &sa_rec->sgid);
786 	if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
787 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
788 			sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
789 
790 	if (comp_mask & IB_SA_PATH_REC_PKEY) {
791 		val16 = be16_to_cpu(sa_rec->pkey);
792 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
793 			sizeof(val16), &val16);
794 	}
795 	if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
796 		val16 = be16_to_cpu(sa_rec->qos_class);
797 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
798 			sizeof(val16), &val16);
799 	}
800 }
801 
802 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
803 {
804 	int len = 0;
805 
806 	if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
807 		len += nla_total_size(sizeof(u64));
808 	if (comp_mask & IB_SA_PATH_REC_DGID)
809 		len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
810 	if (comp_mask & IB_SA_PATH_REC_SGID)
811 		len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
812 	if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
813 		len += nla_total_size(sizeof(u8));
814 	if (comp_mask & IB_SA_PATH_REC_PKEY)
815 		len += nla_total_size(sizeof(u16));
816 	if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
817 		len += nla_total_size(sizeof(u16));
818 
819 	/*
820 	 * Make sure that at least some of the required comp_mask bits are
821 	 * set.
822 	 */
823 	if (WARN_ON(len == 0))
824 		return len;
825 
826 	/* Add the family header */
827 	len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
828 
829 	return len;
830 }
831 
832 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
833 {
834 	struct sk_buff *skb = NULL;
835 	struct nlmsghdr *nlh;
836 	void *data;
837 	struct ib_sa_mad *mad;
838 	int len;
839 	unsigned long flags;
840 	unsigned long delay;
841 	gfp_t gfp_flag;
842 	int ret;
843 
844 	INIT_LIST_HEAD(&query->list);
845 	query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
846 
847 	mad = query->mad_buf->mad;
848 	len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
849 	if (len <= 0)
850 		return -EMSGSIZE;
851 
852 	skb = nlmsg_new(len, gfp_mask);
853 	if (!skb)
854 		return -ENOMEM;
855 
856 	/* Put nlmsg header only for now */
857 	data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
858 			    RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
859 	if (!data) {
860 		nlmsg_free(skb);
861 		return -EMSGSIZE;
862 	}
863 
864 	/* Add attributes */
865 	ib_nl_set_path_rec_attrs(skb, query);
866 
867 	/* Repair the nlmsg header length */
868 	nlmsg_end(skb, nlh);
869 
870 	gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
871 		GFP_NOWAIT;
872 
873 	spin_lock_irqsave(&ib_nl_request_lock, flags);
874 	ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
875 
876 	if (ret)
877 		goto out;
878 
879 	/* Put the request on the list.*/
880 	delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
881 	query->timeout = delay + jiffies;
882 	list_add_tail(&query->list, &ib_nl_request_list);
883 	/* Start the timeout if this is the only request */
884 	if (ib_nl_request_list.next == &query->list)
885 		queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
886 
887 out:
888 	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
889 
890 	return ret;
891 }
892 
893 static int ib_nl_cancel_request(struct ib_sa_query *query)
894 {
895 	unsigned long flags;
896 	struct ib_sa_query *wait_query;
897 	int found = 0;
898 
899 	spin_lock_irqsave(&ib_nl_request_lock, flags);
900 	list_for_each_entry(wait_query, &ib_nl_request_list, list) {
901 		/* Let the timeout to take care of the callback */
902 		if (query == wait_query) {
903 			query->flags |= IB_SA_CANCEL;
904 			query->timeout = jiffies;
905 			list_move(&query->list, &ib_nl_request_list);
906 			found = 1;
907 			mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
908 			break;
909 		}
910 	}
911 	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
912 
913 	return found;
914 }
915 
916 static void send_handler(struct ib_mad_agent *agent,
917 			 struct ib_mad_send_wc *mad_send_wc);
918 
919 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
920 					   const struct nlmsghdr *nlh)
921 {
922 	struct ib_mad_send_wc mad_send_wc;
923 	struct ib_sa_mad *mad = NULL;
924 	const struct nlattr *head, *curr;
925 	struct ib_path_rec_data  *rec;
926 	int len, rem;
927 	u32 mask = 0;
928 	int status = -EIO;
929 
930 	if (query->callback) {
931 		head = (const struct nlattr *) nlmsg_data(nlh);
932 		len = nlmsg_len(nlh);
933 		switch (query->path_use) {
934 		case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
935 			mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
936 			break;
937 
938 		case LS_RESOLVE_PATH_USE_ALL:
939 		case LS_RESOLVE_PATH_USE_GMP:
940 		default:
941 			mask = IB_PATH_PRIMARY | IB_PATH_GMP |
942 				IB_PATH_BIDIRECTIONAL;
943 			break;
944 		}
945 		nla_for_each_attr(curr, head, len, rem) {
946 			if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
947 				rec = nla_data(curr);
948 				/*
949 				 * Get the first one. In the future, we may
950 				 * need to get up to 6 pathrecords.
951 				 */
952 				if ((rec->flags & mask) == mask) {
953 					mad = query->mad_buf->mad;
954 					mad->mad_hdr.method |=
955 						IB_MGMT_METHOD_RESP;
956 					memcpy(mad->data, rec->path_rec,
957 					       sizeof(rec->path_rec));
958 					status = 0;
959 					break;
960 				}
961 			}
962 		}
963 		query->callback(query, status, mad);
964 	}
965 
966 	mad_send_wc.send_buf = query->mad_buf;
967 	mad_send_wc.status = IB_WC_SUCCESS;
968 	send_handler(query->mad_buf->mad_agent, &mad_send_wc);
969 }
970 
971 static void ib_nl_request_timeout(struct work_struct *work)
972 {
973 	unsigned long flags;
974 	struct ib_sa_query *query;
975 	unsigned long delay;
976 	struct ib_mad_send_wc mad_send_wc;
977 	int ret;
978 
979 	spin_lock_irqsave(&ib_nl_request_lock, flags);
980 	while (!list_empty(&ib_nl_request_list)) {
981 		query = list_entry(ib_nl_request_list.next,
982 				   struct ib_sa_query, list);
983 
984 		if (time_after(query->timeout, jiffies)) {
985 			delay = query->timeout - jiffies;
986 			if ((long)delay <= 0)
987 				delay = 1;
988 			queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
989 			break;
990 		}
991 
992 		list_del(&query->list);
993 		ib_sa_disable_local_svc(query);
994 		/* Hold the lock to protect against query cancellation */
995 		if (ib_sa_query_cancelled(query))
996 			ret = -1;
997 		else
998 			ret = ib_post_send_mad(query->mad_buf, NULL);
999 		if (ret) {
1000 			mad_send_wc.send_buf = query->mad_buf;
1001 			mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
1002 			spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1003 			send_handler(query->port->agent, &mad_send_wc);
1004 			spin_lock_irqsave(&ib_nl_request_lock, flags);
1005 		}
1006 	}
1007 	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1008 }
1009 
1010 int ib_nl_handle_set_timeout(struct sk_buff *skb,
1011 			     struct nlmsghdr *nlh,
1012 			     struct netlink_ext_ack *extack)
1013 {
1014 	int timeout, delta, abs_delta;
1015 	const struct nlattr *attr;
1016 	unsigned long flags;
1017 	struct ib_sa_query *query;
1018 	long delay = 0;
1019 	struct nlattr *tb[LS_NLA_TYPE_MAX];
1020 	int ret;
1021 
1022 	if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
1023 	    !(NETLINK_CB(skb).sk))
1024 		return -EPERM;
1025 
1026 	ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1027 				   nlmsg_len(nlh), ib_nl_policy, NULL);
1028 	attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
1029 	if (ret || !attr)
1030 		goto settimeout_out;
1031 
1032 	timeout = *(int *) nla_data(attr);
1033 	if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
1034 		timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
1035 	if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
1036 		timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
1037 
1038 	delta = timeout - sa_local_svc_timeout_ms;
1039 	if (delta < 0)
1040 		abs_delta = -delta;
1041 	else
1042 		abs_delta = delta;
1043 
1044 	if (delta != 0) {
1045 		spin_lock_irqsave(&ib_nl_request_lock, flags);
1046 		sa_local_svc_timeout_ms = timeout;
1047 		list_for_each_entry(query, &ib_nl_request_list, list) {
1048 			if (delta < 0 && abs_delta > query->timeout)
1049 				query->timeout = 0;
1050 			else
1051 				query->timeout += delta;
1052 
1053 			/* Get the new delay from the first entry */
1054 			if (!delay) {
1055 				delay = query->timeout - jiffies;
1056 				if (delay <= 0)
1057 					delay = 1;
1058 			}
1059 		}
1060 		if (delay)
1061 			mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
1062 					 (unsigned long)delay);
1063 		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1064 	}
1065 
1066 settimeout_out:
1067 	return 0;
1068 }
1069 
1070 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1071 {
1072 	struct nlattr *tb[LS_NLA_TYPE_MAX];
1073 	int ret;
1074 
1075 	if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
1076 		return 0;
1077 
1078 	ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1079 				   nlmsg_len(nlh), ib_nl_policy, NULL);
1080 	if (ret)
1081 		return 0;
1082 
1083 	return 1;
1084 }
1085 
1086 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1087 			      struct nlmsghdr *nlh,
1088 			      struct netlink_ext_ack *extack)
1089 {
1090 	unsigned long flags;
1091 	struct ib_sa_query *query;
1092 	struct ib_mad_send_buf *send_buf;
1093 	struct ib_mad_send_wc mad_send_wc;
1094 	int found = 0;
1095 	int ret;
1096 
1097 	if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
1098 	    !(NETLINK_CB(skb).sk))
1099 		return -EPERM;
1100 
1101 	spin_lock_irqsave(&ib_nl_request_lock, flags);
1102 	list_for_each_entry(query, &ib_nl_request_list, list) {
1103 		/*
1104 		 * If the query is cancelled, let the timeout routine
1105 		 * take care of it.
1106 		 */
1107 		if (nlh->nlmsg_seq == query->seq) {
1108 			found = !ib_sa_query_cancelled(query);
1109 			if (found)
1110 				list_del(&query->list);
1111 			break;
1112 		}
1113 	}
1114 
1115 	if (!found) {
1116 		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1117 		goto resp_out;
1118 	}
1119 
1120 	send_buf = query->mad_buf;
1121 
1122 	if (!ib_nl_is_good_resolve_resp(nlh)) {
1123 		/* if the result is a failure, send out the packet via IB */
1124 		ib_sa_disable_local_svc(query);
1125 		ret = ib_post_send_mad(query->mad_buf, NULL);
1126 		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1127 		if (ret) {
1128 			mad_send_wc.send_buf = send_buf;
1129 			mad_send_wc.status = IB_WC_GENERAL_ERR;
1130 			send_handler(query->port->agent, &mad_send_wc);
1131 		}
1132 	} else {
1133 		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1134 		ib_nl_process_good_resolve_rsp(query, nlh);
1135 	}
1136 
1137 resp_out:
1138 	return 0;
1139 }
1140 
1141 static void free_sm_ah(struct kref *kref)
1142 {
1143 	struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1144 
1145 	rdma_destroy_ah(sm_ah->ah, 0);
1146 	kfree(sm_ah);
1147 }
1148 
1149 void ib_sa_register_client(struct ib_sa_client *client)
1150 {
1151 	atomic_set(&client->users, 1);
1152 	init_completion(&client->comp);
1153 }
1154 EXPORT_SYMBOL(ib_sa_register_client);
1155 
1156 void ib_sa_unregister_client(struct ib_sa_client *client)
1157 {
1158 	ib_sa_client_put(client);
1159 	wait_for_completion(&client->comp);
1160 }
1161 EXPORT_SYMBOL(ib_sa_unregister_client);
1162 
1163 /**
1164  * ib_sa_cancel_query - try to cancel an SA query
1165  * @id:ID of query to cancel
1166  * @query:query pointer to cancel
1167  *
1168  * Try to cancel an SA query.  If the id and query don't match up or
1169  * the query has already completed, nothing is done.  Otherwise the
1170  * query is canceled and will complete with a status of -EINTR.
1171  */
1172 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1173 {
1174 	unsigned long flags;
1175 	struct ib_mad_agent *agent;
1176 	struct ib_mad_send_buf *mad_buf;
1177 
1178 	xa_lock_irqsave(&queries, flags);
1179 	if (xa_load(&queries, id) != query) {
1180 		xa_unlock_irqrestore(&queries, flags);
1181 		return;
1182 	}
1183 	agent = query->port->agent;
1184 	mad_buf = query->mad_buf;
1185 	xa_unlock_irqrestore(&queries, flags);
1186 
1187 	/*
1188 	 * If the query is still on the netlink request list, schedule
1189 	 * it to be cancelled by the timeout routine. Otherwise, it has been
1190 	 * sent to the MAD layer and has to be cancelled from there.
1191 	 */
1192 	if (!ib_nl_cancel_request(query))
1193 		ib_cancel_mad(agent, mad_buf);
1194 }
1195 EXPORT_SYMBOL(ib_sa_cancel_query);
1196 
1197 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
1198 {
1199 	struct ib_sa_device *sa_dev;
1200 	struct ib_sa_port   *port;
1201 	unsigned long flags;
1202 	u8 src_path_mask;
1203 
1204 	sa_dev = ib_get_client_data(device, &sa_client);
1205 	if (!sa_dev)
1206 		return 0x7f;
1207 
1208 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1209 	spin_lock_irqsave(&port->ah_lock, flags);
1210 	src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1211 	spin_unlock_irqrestore(&port->ah_lock, flags);
1212 
1213 	return src_path_mask;
1214 }
1215 
1216 static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
1217 				   struct sa_path_rec *rec,
1218 				   struct rdma_ah_attr *ah_attr,
1219 				   const struct ib_gid_attr *gid_attr)
1220 {
1221 	enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
1222 
1223 	if (!gid_attr) {
1224 		gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type,
1225 						 port_num, NULL);
1226 		if (IS_ERR(gid_attr))
1227 			return PTR_ERR(gid_attr);
1228 	} else
1229 		rdma_hold_gid_attr(gid_attr);
1230 
1231 	rdma_move_grh_sgid_attr(ah_attr, &rec->dgid,
1232 				be32_to_cpu(rec->flow_label),
1233 				rec->hop_limit,	rec->traffic_class,
1234 				gid_attr);
1235 	return 0;
1236 }
1237 
1238 /**
1239  * ib_init_ah_attr_from_path - Initialize address handle attributes based on
1240  *   an SA path record.
1241  * @device: Device associated ah attributes initialization.
1242  * @port_num: Port on the specified device.
1243  * @rec: path record entry to use for ah attributes initialization.
1244  * @ah_attr: address handle attributes to initialization from path record.
1245  * @gid_attr: SGID attribute to consider during initialization.
1246  *
1247  * When ib_init_ah_attr_from_path() returns success,
1248  * (a) for IB link layer it optionally contains a reference to SGID attribute
1249  * when GRH is present for IB link layer.
1250  * (b) for RoCE link layer it contains a reference to SGID attribute.
1251  * User must invoke rdma_destroy_ah_attr() to release reference to SGID
1252  * attributes which are initialized using ib_init_ah_attr_from_path().
1253  */
1254 int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
1255 			      struct sa_path_rec *rec,
1256 			      struct rdma_ah_attr *ah_attr,
1257 			      const struct ib_gid_attr *gid_attr)
1258 {
1259 	int ret = 0;
1260 
1261 	memset(ah_attr, 0, sizeof(*ah_attr));
1262 	ah_attr->type = rdma_ah_find_type(device, port_num);
1263 	rdma_ah_set_sl(ah_attr, rec->sl);
1264 	rdma_ah_set_port_num(ah_attr, port_num);
1265 	rdma_ah_set_static_rate(ah_attr, rec->rate);
1266 
1267 	if (sa_path_is_roce(rec)) {
1268 		ret = roce_resolve_route_from_path(rec, gid_attr);
1269 		if (ret)
1270 			return ret;
1271 
1272 		memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN);
1273 	} else {
1274 		rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
1275 		if (sa_path_is_opa(rec) &&
1276 		    rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))
1277 			rdma_ah_set_make_grd(ah_attr, true);
1278 
1279 		rdma_ah_set_path_bits(ah_attr,
1280 				      be32_to_cpu(sa_path_get_slid(rec)) &
1281 				      get_src_path_mask(device, port_num));
1282 	}
1283 
1284 	if (rec->hop_limit > 0 || sa_path_is_roce(rec))
1285 		ret = init_ah_attr_grh_fields(device, port_num,
1286 					      rec, ah_attr, gid_attr);
1287 	return ret;
1288 }
1289 EXPORT_SYMBOL(ib_init_ah_attr_from_path);
1290 
1291 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1292 {
1293 	struct rdma_ah_attr ah_attr;
1294 	unsigned long flags;
1295 
1296 	spin_lock_irqsave(&query->port->ah_lock, flags);
1297 	if (!query->port->sm_ah) {
1298 		spin_unlock_irqrestore(&query->port->ah_lock, flags);
1299 		return -EAGAIN;
1300 	}
1301 	kref_get(&query->port->sm_ah->ref);
1302 	query->sm_ah = query->port->sm_ah;
1303 	spin_unlock_irqrestore(&query->port->ah_lock, flags);
1304 
1305 	/*
1306 	 * Always check if sm_ah has valid dlid assigned,
1307 	 * before querying for class port info
1308 	 */
1309 	if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
1310 	    !rdma_is_valid_unicast_lid(&ah_attr)) {
1311 		kref_put(&query->sm_ah->ref, free_sm_ah);
1312 		return -EAGAIN;
1313 	}
1314 	query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1315 					    query->sm_ah->pkey_index,
1316 					    0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1317 					    gfp_mask,
1318 					    ((query->flags & IB_SA_QUERY_OPA) ?
1319 					     OPA_MGMT_BASE_VERSION :
1320 					     IB_MGMT_BASE_VERSION));
1321 	if (IS_ERR(query->mad_buf)) {
1322 		kref_put(&query->sm_ah->ref, free_sm_ah);
1323 		return -ENOMEM;
1324 	}
1325 
1326 	query->mad_buf->ah = query->sm_ah->ah;
1327 
1328 	return 0;
1329 }
1330 
1331 static void free_mad(struct ib_sa_query *query)
1332 {
1333 	ib_free_send_mad(query->mad_buf);
1334 	kref_put(&query->sm_ah->ref, free_sm_ah);
1335 }
1336 
1337 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1338 {
1339 	struct ib_sa_mad *mad = query->mad_buf->mad;
1340 	unsigned long flags;
1341 
1342 	memset(mad, 0, sizeof *mad);
1343 
1344 	if (query->flags & IB_SA_QUERY_OPA) {
1345 		mad->mad_hdr.base_version  = OPA_MGMT_BASE_VERSION;
1346 		mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1347 	} else {
1348 		mad->mad_hdr.base_version  = IB_MGMT_BASE_VERSION;
1349 		mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1350 	}
1351 	mad->mad_hdr.mgmt_class    = IB_MGMT_CLASS_SUBN_ADM;
1352 	spin_lock_irqsave(&tid_lock, flags);
1353 	mad->mad_hdr.tid           =
1354 		cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1355 	spin_unlock_irqrestore(&tid_lock, flags);
1356 }
1357 
1358 static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
1359 		    gfp_t gfp_mask)
1360 {
1361 	unsigned long flags;
1362 	int ret, id;
1363 
1364 	xa_lock_irqsave(&queries, flags);
1365 	ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
1366 	xa_unlock_irqrestore(&queries, flags);
1367 	if (ret < 0)
1368 		return ret;
1369 
1370 	query->mad_buf->timeout_ms  = timeout_ms;
1371 	query->mad_buf->context[0] = query;
1372 	query->id = id;
1373 
1374 	if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
1375 	    (!(query->flags & IB_SA_QUERY_OPA))) {
1376 		if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
1377 			if (!ib_nl_make_request(query, gfp_mask))
1378 				return id;
1379 		}
1380 		ib_sa_disable_local_svc(query);
1381 	}
1382 
1383 	ret = ib_post_send_mad(query->mad_buf, NULL);
1384 	if (ret) {
1385 		xa_lock_irqsave(&queries, flags);
1386 		__xa_erase(&queries, id);
1387 		xa_unlock_irqrestore(&queries, flags);
1388 	}
1389 
1390 	/*
1391 	 * It's not safe to dereference query any more, because the
1392 	 * send may already have completed and freed the query in
1393 	 * another context.
1394 	 */
1395 	return ret ? ret : id;
1396 }
1397 
1398 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1399 {
1400 	ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1401 }
1402 EXPORT_SYMBOL(ib_sa_unpack_path);
1403 
1404 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1405 {
1406 	ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1407 }
1408 EXPORT_SYMBOL(ib_sa_pack_path);
1409 
1410 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1411 					 struct ib_sa_device *sa_dev,
1412 					 u8 port_num)
1413 {
1414 	struct ib_sa_port *port;
1415 	unsigned long flags;
1416 	bool ret = false;
1417 
1418 	port = &sa_dev->port[port_num - sa_dev->start_port];
1419 	spin_lock_irqsave(&port->classport_lock, flags);
1420 	if (!port->classport_info.valid)
1421 		goto ret;
1422 
1423 	if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
1424 		ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
1425 			OPA_CLASS_PORT_INFO_PR_SUPPORT;
1426 ret:
1427 	spin_unlock_irqrestore(&port->classport_lock, flags);
1428 	return ret;
1429 }
1430 
1431 enum opa_pr_supported {
1432 	PR_NOT_SUPPORTED,
1433 	PR_OPA_SUPPORTED,
1434 	PR_IB_SUPPORTED
1435 };
1436 
1437 /**
1438  * Check if current PR query can be an OPA query.
1439  * Retuns PR_NOT_SUPPORTED if a path record query is not
1440  * possible, PR_OPA_SUPPORTED if an OPA path record query
1441  * is possible and PR_IB_SUPPORTED if an IB path record
1442  * query is possible.
1443  */
1444 static int opa_pr_query_possible(struct ib_sa_client *client,
1445 				 struct ib_sa_device *sa_dev,
1446 				 struct ib_device *device, u8 port_num,
1447 				 struct sa_path_rec *rec)
1448 {
1449 	struct ib_port_attr port_attr;
1450 
1451 	if (ib_query_port(device, port_num, &port_attr))
1452 		return PR_NOT_SUPPORTED;
1453 
1454 	if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num))
1455 		return PR_OPA_SUPPORTED;
1456 
1457 	if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1458 		return PR_NOT_SUPPORTED;
1459 	else
1460 		return PR_IB_SUPPORTED;
1461 }
1462 
1463 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1464 				    int status,
1465 				    struct ib_sa_mad *mad)
1466 {
1467 	struct ib_sa_path_query *query =
1468 		container_of(sa_query, struct ib_sa_path_query, sa_query);
1469 
1470 	if (mad) {
1471 		struct sa_path_rec rec;
1472 
1473 		if (sa_query->flags & IB_SA_QUERY_OPA) {
1474 			ib_unpack(opa_path_rec_table,
1475 				  ARRAY_SIZE(opa_path_rec_table),
1476 				  mad->data, &rec);
1477 			rec.rec_type = SA_PATH_REC_TYPE_OPA;
1478 			query->callback(status, &rec, query->context);
1479 		} else {
1480 			ib_unpack(path_rec_table,
1481 				  ARRAY_SIZE(path_rec_table),
1482 				  mad->data, &rec);
1483 			rec.rec_type = SA_PATH_REC_TYPE_IB;
1484 			sa_path_set_dmac_zero(&rec);
1485 
1486 			if (query->conv_pr) {
1487 				struct sa_path_rec opa;
1488 
1489 				memset(&opa, 0, sizeof(struct sa_path_rec));
1490 				sa_convert_path_ib_to_opa(&opa, &rec);
1491 				query->callback(status, &opa, query->context);
1492 			} else {
1493 				query->callback(status, &rec, query->context);
1494 			}
1495 		}
1496 	} else
1497 		query->callback(status, NULL, query->context);
1498 }
1499 
1500 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1501 {
1502 	struct ib_sa_path_query *query =
1503 		container_of(sa_query, struct ib_sa_path_query, sa_query);
1504 
1505 	kfree(query->conv_pr);
1506 	kfree(query);
1507 }
1508 
1509 /**
1510  * ib_sa_path_rec_get - Start a Path get query
1511  * @client:SA client
1512  * @device:device to send query on
1513  * @port_num: port number to send query on
1514  * @rec:Path Record to send in query
1515  * @comp_mask:component mask to send in query
1516  * @timeout_ms:time to wait for response
1517  * @gfp_mask:GFP mask to use for internal allocations
1518  * @callback:function called when query completes, times out or is
1519  * canceled
1520  * @context:opaque user context passed to callback
1521  * @sa_query:query context, used to cancel query
1522  *
1523  * Send a Path Record Get query to the SA to look up a path.  The
1524  * callback function will be called when the query completes (or
1525  * fails); status is 0 for a successful response, -EINTR if the query
1526  * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1527  * occurred sending the query.  The resp parameter of the callback is
1528  * only valid if status is 0.
1529  *
1530  * If the return value of ib_sa_path_rec_get() is negative, it is an
1531  * error code.  Otherwise it is a query ID that can be used to cancel
1532  * the query.
1533  */
1534 int ib_sa_path_rec_get(struct ib_sa_client *client,
1535 		       struct ib_device *device, u8 port_num,
1536 		       struct sa_path_rec *rec,
1537 		       ib_sa_comp_mask comp_mask,
1538 		       unsigned long timeout_ms, gfp_t gfp_mask,
1539 		       void (*callback)(int status,
1540 					struct sa_path_rec *resp,
1541 					void *context),
1542 		       void *context,
1543 		       struct ib_sa_query **sa_query)
1544 {
1545 	struct ib_sa_path_query *query;
1546 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1547 	struct ib_sa_port   *port;
1548 	struct ib_mad_agent *agent;
1549 	struct ib_sa_mad *mad;
1550 	enum opa_pr_supported status;
1551 	int ret;
1552 
1553 	if (!sa_dev)
1554 		return -ENODEV;
1555 
1556 	if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
1557 	    (rec->rec_type != SA_PATH_REC_TYPE_OPA))
1558 		return -EINVAL;
1559 
1560 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1561 	agent = port->agent;
1562 
1563 	query = kzalloc(sizeof(*query), gfp_mask);
1564 	if (!query)
1565 		return -ENOMEM;
1566 
1567 	query->sa_query.port     = port;
1568 	if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1569 		status = opa_pr_query_possible(client, sa_dev, device, port_num,
1570 					       rec);
1571 		if (status == PR_NOT_SUPPORTED) {
1572 			ret = -EINVAL;
1573 			goto err1;
1574 		} else if (status == PR_OPA_SUPPORTED) {
1575 			query->sa_query.flags |= IB_SA_QUERY_OPA;
1576 		} else {
1577 			query->conv_pr =
1578 				kmalloc(sizeof(*query->conv_pr), gfp_mask);
1579 			if (!query->conv_pr) {
1580 				ret = -ENOMEM;
1581 				goto err1;
1582 			}
1583 		}
1584 	}
1585 
1586 	ret = alloc_mad(&query->sa_query, gfp_mask);
1587 	if (ret)
1588 		goto err2;
1589 
1590 	ib_sa_client_get(client);
1591 	query->sa_query.client = client;
1592 	query->callback        = callback;
1593 	query->context         = context;
1594 
1595 	mad = query->sa_query.mad_buf->mad;
1596 	init_mad(&query->sa_query, agent);
1597 
1598 	query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1599 	query->sa_query.release  = ib_sa_path_rec_release;
1600 	mad->mad_hdr.method	 = IB_MGMT_METHOD_GET;
1601 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1602 	mad->sa_hdr.comp_mask	 = comp_mask;
1603 
1604 	if (query->sa_query.flags & IB_SA_QUERY_OPA) {
1605 		ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1606 			rec, mad->data);
1607 	} else if (query->conv_pr) {
1608 		sa_convert_path_opa_to_ib(query->conv_pr, rec);
1609 		ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1610 			query->conv_pr, mad->data);
1611 	} else {
1612 		ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1613 			rec, mad->data);
1614 	}
1615 
1616 	*sa_query = &query->sa_query;
1617 
1618 	query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1619 	query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
1620 						query->conv_pr : rec;
1621 
1622 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1623 	if (ret < 0)
1624 		goto err3;
1625 
1626 	return ret;
1627 
1628 err3:
1629 	*sa_query = NULL;
1630 	ib_sa_client_put(query->sa_query.client);
1631 	free_mad(&query->sa_query);
1632 err2:
1633 	kfree(query->conv_pr);
1634 err1:
1635 	kfree(query);
1636 	return ret;
1637 }
1638 EXPORT_SYMBOL(ib_sa_path_rec_get);
1639 
1640 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1641 				    int status,
1642 				    struct ib_sa_mad *mad)
1643 {
1644 	struct ib_sa_service_query *query =
1645 		container_of(sa_query, struct ib_sa_service_query, sa_query);
1646 
1647 	if (mad) {
1648 		struct ib_sa_service_rec rec;
1649 
1650 		ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1651 			  mad->data, &rec);
1652 		query->callback(status, &rec, query->context);
1653 	} else
1654 		query->callback(status, NULL, query->context);
1655 }
1656 
1657 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1658 {
1659 	kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1660 }
1661 
1662 /**
1663  * ib_sa_service_rec_query - Start Service Record operation
1664  * @client:SA client
1665  * @device:device to send request on
1666  * @port_num: port number to send request on
1667  * @method:SA method - should be get, set, or delete
1668  * @rec:Service Record to send in request
1669  * @comp_mask:component mask to send in request
1670  * @timeout_ms:time to wait for response
1671  * @gfp_mask:GFP mask to use for internal allocations
1672  * @callback:function called when request completes, times out or is
1673  * canceled
1674  * @context:opaque user context passed to callback
1675  * @sa_query:request context, used to cancel request
1676  *
1677  * Send a Service Record set/get/delete to the SA to register,
1678  * unregister or query a service record.
1679  * The callback function will be called when the request completes (or
1680  * fails); status is 0 for a successful response, -EINTR if the query
1681  * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1682  * occurred sending the query.  The resp parameter of the callback is
1683  * only valid if status is 0.
1684  *
1685  * If the return value of ib_sa_service_rec_query() is negative, it is an
1686  * error code.  Otherwise it is a request ID that can be used to cancel
1687  * the query.
1688  */
1689 int ib_sa_service_rec_query(struct ib_sa_client *client,
1690 			    struct ib_device *device, u8 port_num, u8 method,
1691 			    struct ib_sa_service_rec *rec,
1692 			    ib_sa_comp_mask comp_mask,
1693 			    unsigned long timeout_ms, gfp_t gfp_mask,
1694 			    void (*callback)(int status,
1695 					     struct ib_sa_service_rec *resp,
1696 					     void *context),
1697 			    void *context,
1698 			    struct ib_sa_query **sa_query)
1699 {
1700 	struct ib_sa_service_query *query;
1701 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1702 	struct ib_sa_port   *port;
1703 	struct ib_mad_agent *agent;
1704 	struct ib_sa_mad *mad;
1705 	int ret;
1706 
1707 	if (!sa_dev)
1708 		return -ENODEV;
1709 
1710 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1711 	agent = port->agent;
1712 
1713 	if (method != IB_MGMT_METHOD_GET &&
1714 	    method != IB_MGMT_METHOD_SET &&
1715 	    method != IB_SA_METHOD_DELETE)
1716 		return -EINVAL;
1717 
1718 	query = kzalloc(sizeof(*query), gfp_mask);
1719 	if (!query)
1720 		return -ENOMEM;
1721 
1722 	query->sa_query.port     = port;
1723 	ret = alloc_mad(&query->sa_query, gfp_mask);
1724 	if (ret)
1725 		goto err1;
1726 
1727 	ib_sa_client_get(client);
1728 	query->sa_query.client = client;
1729 	query->callback        = callback;
1730 	query->context         = context;
1731 
1732 	mad = query->sa_query.mad_buf->mad;
1733 	init_mad(&query->sa_query, agent);
1734 
1735 	query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1736 	query->sa_query.release  = ib_sa_service_rec_release;
1737 	mad->mad_hdr.method	 = method;
1738 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1739 	mad->sa_hdr.comp_mask	 = comp_mask;
1740 
1741 	ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1742 		rec, mad->data);
1743 
1744 	*sa_query = &query->sa_query;
1745 
1746 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1747 	if (ret < 0)
1748 		goto err2;
1749 
1750 	return ret;
1751 
1752 err2:
1753 	*sa_query = NULL;
1754 	ib_sa_client_put(query->sa_query.client);
1755 	free_mad(&query->sa_query);
1756 
1757 err1:
1758 	kfree(query);
1759 	return ret;
1760 }
1761 EXPORT_SYMBOL(ib_sa_service_rec_query);
1762 
1763 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1764 					int status,
1765 					struct ib_sa_mad *mad)
1766 {
1767 	struct ib_sa_mcmember_query *query =
1768 		container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1769 
1770 	if (mad) {
1771 		struct ib_sa_mcmember_rec rec;
1772 
1773 		ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1774 			  mad->data, &rec);
1775 		query->callback(status, &rec, query->context);
1776 	} else
1777 		query->callback(status, NULL, query->context);
1778 }
1779 
1780 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1781 {
1782 	kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1783 }
1784 
1785 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1786 			     struct ib_device *device, u8 port_num,
1787 			     u8 method,
1788 			     struct ib_sa_mcmember_rec *rec,
1789 			     ib_sa_comp_mask comp_mask,
1790 			     unsigned long timeout_ms, gfp_t gfp_mask,
1791 			     void (*callback)(int status,
1792 					      struct ib_sa_mcmember_rec *resp,
1793 					      void *context),
1794 			     void *context,
1795 			     struct ib_sa_query **sa_query)
1796 {
1797 	struct ib_sa_mcmember_query *query;
1798 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1799 	struct ib_sa_port   *port;
1800 	struct ib_mad_agent *agent;
1801 	struct ib_sa_mad *mad;
1802 	int ret;
1803 
1804 	if (!sa_dev)
1805 		return -ENODEV;
1806 
1807 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1808 	agent = port->agent;
1809 
1810 	query = kzalloc(sizeof(*query), gfp_mask);
1811 	if (!query)
1812 		return -ENOMEM;
1813 
1814 	query->sa_query.port     = port;
1815 	ret = alloc_mad(&query->sa_query, gfp_mask);
1816 	if (ret)
1817 		goto err1;
1818 
1819 	ib_sa_client_get(client);
1820 	query->sa_query.client = client;
1821 	query->callback        = callback;
1822 	query->context         = context;
1823 
1824 	mad = query->sa_query.mad_buf->mad;
1825 	init_mad(&query->sa_query, agent);
1826 
1827 	query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1828 	query->sa_query.release  = ib_sa_mcmember_rec_release;
1829 	mad->mad_hdr.method	 = method;
1830 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1831 	mad->sa_hdr.comp_mask	 = comp_mask;
1832 
1833 	ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1834 		rec, mad->data);
1835 
1836 	*sa_query = &query->sa_query;
1837 
1838 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1839 	if (ret < 0)
1840 		goto err2;
1841 
1842 	return ret;
1843 
1844 err2:
1845 	*sa_query = NULL;
1846 	ib_sa_client_put(query->sa_query.client);
1847 	free_mad(&query->sa_query);
1848 
1849 err1:
1850 	kfree(query);
1851 	return ret;
1852 }
1853 
1854 /* Support GuidInfoRecord */
1855 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1856 					int status,
1857 					struct ib_sa_mad *mad)
1858 {
1859 	struct ib_sa_guidinfo_query *query =
1860 		container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1861 
1862 	if (mad) {
1863 		struct ib_sa_guidinfo_rec rec;
1864 
1865 		ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1866 			  mad->data, &rec);
1867 		query->callback(status, &rec, query->context);
1868 	} else
1869 		query->callback(status, NULL, query->context);
1870 }
1871 
1872 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1873 {
1874 	kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1875 }
1876 
1877 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1878 			      struct ib_device *device, u8 port_num,
1879 			      struct ib_sa_guidinfo_rec *rec,
1880 			      ib_sa_comp_mask comp_mask, u8 method,
1881 			      unsigned long timeout_ms, gfp_t gfp_mask,
1882 			      void (*callback)(int status,
1883 					       struct ib_sa_guidinfo_rec *resp,
1884 					       void *context),
1885 			      void *context,
1886 			      struct ib_sa_query **sa_query)
1887 {
1888 	struct ib_sa_guidinfo_query *query;
1889 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1890 	struct ib_sa_port *port;
1891 	struct ib_mad_agent *agent;
1892 	struct ib_sa_mad *mad;
1893 	int ret;
1894 
1895 	if (!sa_dev)
1896 		return -ENODEV;
1897 
1898 	if (method != IB_MGMT_METHOD_GET &&
1899 	    method != IB_MGMT_METHOD_SET &&
1900 	    method != IB_SA_METHOD_DELETE) {
1901 		return -EINVAL;
1902 	}
1903 
1904 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1905 	agent = port->agent;
1906 
1907 	query = kzalloc(sizeof(*query), gfp_mask);
1908 	if (!query)
1909 		return -ENOMEM;
1910 
1911 	query->sa_query.port = port;
1912 	ret = alloc_mad(&query->sa_query, gfp_mask);
1913 	if (ret)
1914 		goto err1;
1915 
1916 	ib_sa_client_get(client);
1917 	query->sa_query.client = client;
1918 	query->callback        = callback;
1919 	query->context         = context;
1920 
1921 	mad = query->sa_query.mad_buf->mad;
1922 	init_mad(&query->sa_query, agent);
1923 
1924 	query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1925 	query->sa_query.release  = ib_sa_guidinfo_rec_release;
1926 
1927 	mad->mad_hdr.method	 = method;
1928 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1929 	mad->sa_hdr.comp_mask	 = comp_mask;
1930 
1931 	ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1932 		mad->data);
1933 
1934 	*sa_query = &query->sa_query;
1935 
1936 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1937 	if (ret < 0)
1938 		goto err2;
1939 
1940 	return ret;
1941 
1942 err2:
1943 	*sa_query = NULL;
1944 	ib_sa_client_put(query->sa_query.client);
1945 	free_mad(&query->sa_query);
1946 
1947 err1:
1948 	kfree(query);
1949 	return ret;
1950 }
1951 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1952 
1953 bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
1954 				    struct ib_device *device,
1955 				    u8 port_num)
1956 {
1957 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1958 	struct ib_sa_port *port;
1959 	bool ret = false;
1960 	unsigned long flags;
1961 
1962 	if (!sa_dev)
1963 		return ret;
1964 
1965 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1966 
1967 	spin_lock_irqsave(&port->classport_lock, flags);
1968 	if ((port->classport_info.valid) &&
1969 	    (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB))
1970 		ret = ib_get_cpi_capmask2(&port->classport_info.data.ib)
1971 			& IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT;
1972 	spin_unlock_irqrestore(&port->classport_lock, flags);
1973 	return ret;
1974 }
1975 EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support);
1976 
1977 struct ib_classport_info_context {
1978 	struct completion	done;
1979 	struct ib_sa_query	*sa_query;
1980 };
1981 
1982 static void ib_classportinfo_cb(void *context)
1983 {
1984 	struct ib_classport_info_context *cb_ctx = context;
1985 
1986 	complete(&cb_ctx->done);
1987 }
1988 
1989 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1990 					      int status,
1991 					      struct ib_sa_mad *mad)
1992 {
1993 	unsigned long flags;
1994 	struct ib_sa_classport_info_query *query =
1995 		container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
1996 	struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
1997 
1998 	if (mad) {
1999 		if (sa_query->flags & IB_SA_QUERY_OPA) {
2000 			struct opa_class_port_info rec;
2001 
2002 			ib_unpack(opa_classport_info_rec_table,
2003 				  ARRAY_SIZE(opa_classport_info_rec_table),
2004 				  mad->data, &rec);
2005 
2006 			spin_lock_irqsave(&sa_query->port->classport_lock,
2007 					  flags);
2008 			if (!status && !info->valid) {
2009 				memcpy(&info->data.opa, &rec,
2010 				       sizeof(info->data.opa));
2011 
2012 				info->valid = true;
2013 				info->data.type = RDMA_CLASS_PORT_INFO_OPA;
2014 			}
2015 			spin_unlock_irqrestore(&sa_query->port->classport_lock,
2016 					       flags);
2017 
2018 		} else {
2019 			struct ib_class_port_info rec;
2020 
2021 			ib_unpack(ib_classport_info_rec_table,
2022 				  ARRAY_SIZE(ib_classport_info_rec_table),
2023 				  mad->data, &rec);
2024 
2025 			spin_lock_irqsave(&sa_query->port->classport_lock,
2026 					  flags);
2027 			if (!status && !info->valid) {
2028 				memcpy(&info->data.ib, &rec,
2029 				       sizeof(info->data.ib));
2030 
2031 				info->valid = true;
2032 				info->data.type = RDMA_CLASS_PORT_INFO_IB;
2033 			}
2034 			spin_unlock_irqrestore(&sa_query->port->classport_lock,
2035 					       flags);
2036 		}
2037 	}
2038 	query->callback(query->context);
2039 }
2040 
2041 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
2042 {
2043 	kfree(container_of(sa_query, struct ib_sa_classport_info_query,
2044 			   sa_query));
2045 }
2046 
2047 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
2048 					  unsigned long timeout_ms,
2049 					  void (*callback)(void *context),
2050 					  void *context,
2051 					  struct ib_sa_query **sa_query)
2052 {
2053 	struct ib_mad_agent *agent;
2054 	struct ib_sa_classport_info_query *query;
2055 	struct ib_sa_mad *mad;
2056 	gfp_t gfp_mask = GFP_KERNEL;
2057 	int ret;
2058 
2059 	agent = port->agent;
2060 
2061 	query = kzalloc(sizeof(*query), gfp_mask);
2062 	if (!query)
2063 		return -ENOMEM;
2064 
2065 	query->sa_query.port = port;
2066 	query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
2067 						 port->port_num) ?
2068 				 IB_SA_QUERY_OPA : 0;
2069 	ret = alloc_mad(&query->sa_query, gfp_mask);
2070 	if (ret)
2071 		goto err_free;
2072 
2073 	query->callback = callback;
2074 	query->context = context;
2075 
2076 	mad = query->sa_query.mad_buf->mad;
2077 	init_mad(&query->sa_query, agent);
2078 
2079 	query->sa_query.callback = ib_sa_classport_info_rec_callback;
2080 	query->sa_query.release  = ib_sa_classport_info_rec_release;
2081 	mad->mad_hdr.method	 = IB_MGMT_METHOD_GET;
2082 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
2083 	mad->sa_hdr.comp_mask	 = 0;
2084 	*sa_query = &query->sa_query;
2085 
2086 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
2087 	if (ret < 0)
2088 		goto err_free_mad;
2089 
2090 	return ret;
2091 
2092 err_free_mad:
2093 	*sa_query = NULL;
2094 	free_mad(&query->sa_query);
2095 
2096 err_free:
2097 	kfree(query);
2098 	return ret;
2099 }
2100 
2101 static void update_ib_cpi(struct work_struct *work)
2102 {
2103 	struct ib_sa_port *port =
2104 		container_of(work, struct ib_sa_port, ib_cpi_work.work);
2105 	struct ib_classport_info_context *cb_context;
2106 	unsigned long flags;
2107 	int ret;
2108 
2109 	/* If the classport info is valid, nothing
2110 	 * to do here.
2111 	 */
2112 	spin_lock_irqsave(&port->classport_lock, flags);
2113 	if (port->classport_info.valid) {
2114 		spin_unlock_irqrestore(&port->classport_lock, flags);
2115 		return;
2116 	}
2117 	spin_unlock_irqrestore(&port->classport_lock, flags);
2118 
2119 	cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
2120 	if (!cb_context)
2121 		goto err_nomem;
2122 
2123 	init_completion(&cb_context->done);
2124 
2125 	ret = ib_sa_classport_info_rec_query(port, 3000,
2126 					     ib_classportinfo_cb, cb_context,
2127 					     &cb_context->sa_query);
2128 	if (ret < 0)
2129 		goto free_cb_err;
2130 	wait_for_completion(&cb_context->done);
2131 free_cb_err:
2132 	kfree(cb_context);
2133 	spin_lock_irqsave(&port->classport_lock, flags);
2134 
2135 	/* If the classport info is still not valid, the query should have
2136 	 * failed for some reason. Retry issuing the query
2137 	 */
2138 	if (!port->classport_info.valid) {
2139 		port->classport_info.retry_cnt++;
2140 		if (port->classport_info.retry_cnt <=
2141 		    IB_SA_CPI_MAX_RETRY_CNT) {
2142 			unsigned long delay =
2143 				msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2144 
2145 			queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
2146 		}
2147 	}
2148 	spin_unlock_irqrestore(&port->classport_lock, flags);
2149 
2150 err_nomem:
2151 	return;
2152 }
2153 
2154 static void send_handler(struct ib_mad_agent *agent,
2155 			 struct ib_mad_send_wc *mad_send_wc)
2156 {
2157 	struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
2158 	unsigned long flags;
2159 
2160 	if (query->callback)
2161 		switch (mad_send_wc->status) {
2162 		case IB_WC_SUCCESS:
2163 			/* No callback -- already got recv */
2164 			break;
2165 		case IB_WC_RESP_TIMEOUT_ERR:
2166 			query->callback(query, -ETIMEDOUT, NULL);
2167 			break;
2168 		case IB_WC_WR_FLUSH_ERR:
2169 			query->callback(query, -EINTR, NULL);
2170 			break;
2171 		default:
2172 			query->callback(query, -EIO, NULL);
2173 			break;
2174 		}
2175 
2176 	xa_lock_irqsave(&queries, flags);
2177 	__xa_erase(&queries, query->id);
2178 	xa_unlock_irqrestore(&queries, flags);
2179 
2180 	free_mad(query);
2181 	if (query->client)
2182 		ib_sa_client_put(query->client);
2183 	query->release(query);
2184 }
2185 
2186 static void recv_handler(struct ib_mad_agent *mad_agent,
2187 			 struct ib_mad_send_buf *send_buf,
2188 			 struct ib_mad_recv_wc *mad_recv_wc)
2189 {
2190 	struct ib_sa_query *query;
2191 
2192 	if (!send_buf)
2193 		return;
2194 
2195 	query = send_buf->context[0];
2196 	if (query->callback) {
2197 		if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2198 			query->callback(query,
2199 					mad_recv_wc->recv_buf.mad->mad_hdr.status ?
2200 					-EINVAL : 0,
2201 					(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2202 		else
2203 			query->callback(query, -EIO, NULL);
2204 	}
2205 
2206 	ib_free_recv_mad(mad_recv_wc);
2207 }
2208 
2209 static void update_sm_ah(struct work_struct *work)
2210 {
2211 	struct ib_sa_port *port =
2212 		container_of(work, struct ib_sa_port, update_task);
2213 	struct ib_sa_sm_ah *new_ah;
2214 	struct ib_port_attr port_attr;
2215 	struct rdma_ah_attr   ah_attr;
2216 	bool grh_required;
2217 
2218 	if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2219 		pr_warn("Couldn't query port\n");
2220 		return;
2221 	}
2222 
2223 	new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2224 	if (!new_ah)
2225 		return;
2226 
2227 	kref_init(&new_ah->ref);
2228 	new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2229 
2230 	new_ah->pkey_index = 0;
2231 	if (ib_find_pkey(port->agent->device, port->port_num,
2232 			 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2233 		pr_err("Couldn't find index for default PKey\n");
2234 
2235 	memset(&ah_attr, 0, sizeof(ah_attr));
2236 	ah_attr.type = rdma_ah_find_type(port->agent->device,
2237 					 port->port_num);
2238 	rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2239 	rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2240 	rdma_ah_set_port_num(&ah_attr, port->port_num);
2241 
2242 	grh_required = rdma_is_grh_required(port->agent->device,
2243 					    port->port_num);
2244 
2245 	/*
2246 	 * The OPA sm_lid of 0xFFFF needs special handling so that it can be
2247 	 * differentiated from a permissive LID of 0xFFFF.  We set the
2248 	 * grh_required flag here so the SA can program the DGID in the
2249 	 * address handle appropriately
2250 	 */
2251 	if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA &&
2252 	    (grh_required ||
2253 	     port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE)))
2254 		rdma_ah_set_make_grd(&ah_attr, true);
2255 
2256 	if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) {
2257 		rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2258 		rdma_ah_set_subnet_prefix(&ah_attr,
2259 					  cpu_to_be64(port_attr.subnet_prefix));
2260 		rdma_ah_set_interface_id(&ah_attr,
2261 					 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2262 	}
2263 
2264 	new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr,
2265 				    RDMA_CREATE_AH_SLEEPABLE);
2266 	if (IS_ERR(new_ah->ah)) {
2267 		pr_warn("Couldn't create new SM AH\n");
2268 		kfree(new_ah);
2269 		return;
2270 	}
2271 
2272 	spin_lock_irq(&port->ah_lock);
2273 	if (port->sm_ah)
2274 		kref_put(&port->sm_ah->ref, free_sm_ah);
2275 	port->sm_ah = new_ah;
2276 	spin_unlock_irq(&port->ah_lock);
2277 }
2278 
2279 static void ib_sa_event(struct ib_event_handler *handler,
2280 			struct ib_event *event)
2281 {
2282 	if (event->event == IB_EVENT_PORT_ERR    ||
2283 	    event->event == IB_EVENT_PORT_ACTIVE ||
2284 	    event->event == IB_EVENT_LID_CHANGE  ||
2285 	    event->event == IB_EVENT_PKEY_CHANGE ||
2286 	    event->event == IB_EVENT_SM_CHANGE   ||
2287 	    event->event == IB_EVENT_CLIENT_REREGISTER) {
2288 		unsigned long flags;
2289 		struct ib_sa_device *sa_dev =
2290 			container_of(handler, typeof(*sa_dev), event_handler);
2291 		u8 port_num = event->element.port_num - sa_dev->start_port;
2292 		struct ib_sa_port *port = &sa_dev->port[port_num];
2293 
2294 		if (!rdma_cap_ib_sa(handler->device, port->port_num))
2295 			return;
2296 
2297 		spin_lock_irqsave(&port->ah_lock, flags);
2298 		if (port->sm_ah)
2299 			kref_put(&port->sm_ah->ref, free_sm_ah);
2300 		port->sm_ah = NULL;
2301 		spin_unlock_irqrestore(&port->ah_lock, flags);
2302 
2303 		if (event->event == IB_EVENT_SM_CHANGE ||
2304 		    event->event == IB_EVENT_CLIENT_REREGISTER ||
2305 		    event->event == IB_EVENT_LID_CHANGE ||
2306 		    event->event == IB_EVENT_PORT_ACTIVE) {
2307 			unsigned long delay =
2308 				msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2309 
2310 			spin_lock_irqsave(&port->classport_lock, flags);
2311 			port->classport_info.valid = false;
2312 			port->classport_info.retry_cnt = 0;
2313 			spin_unlock_irqrestore(&port->classport_lock, flags);
2314 			queue_delayed_work(ib_wq,
2315 					   &port->ib_cpi_work, delay);
2316 		}
2317 		queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2318 	}
2319 }
2320 
2321 static int ib_sa_add_one(struct ib_device *device)
2322 {
2323 	struct ib_sa_device *sa_dev;
2324 	int s, e, i;
2325 	int count = 0;
2326 	int ret;
2327 
2328 	s = rdma_start_port(device);
2329 	e = rdma_end_port(device);
2330 
2331 	sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
2332 	if (!sa_dev)
2333 		return -ENOMEM;
2334 
2335 	sa_dev->start_port = s;
2336 	sa_dev->end_port   = e;
2337 
2338 	for (i = 0; i <= e - s; ++i) {
2339 		spin_lock_init(&sa_dev->port[i].ah_lock);
2340 		if (!rdma_cap_ib_sa(device, i + 1))
2341 			continue;
2342 
2343 		sa_dev->port[i].sm_ah    = NULL;
2344 		sa_dev->port[i].port_num = i + s;
2345 
2346 		spin_lock_init(&sa_dev->port[i].classport_lock);
2347 		sa_dev->port[i].classport_info.valid = false;
2348 
2349 		sa_dev->port[i].agent =
2350 			ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2351 					      NULL, 0, send_handler,
2352 					      recv_handler, sa_dev, 0);
2353 		if (IS_ERR(sa_dev->port[i].agent)) {
2354 			ret = PTR_ERR(sa_dev->port[i].agent);
2355 			goto err;
2356 		}
2357 
2358 		INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2359 		INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2360 				  update_ib_cpi);
2361 
2362 		count++;
2363 	}
2364 
2365 	if (!count) {
2366 		ret = -EOPNOTSUPP;
2367 		goto free;
2368 	}
2369 
2370 	ib_set_client_data(device, &sa_client, sa_dev);
2371 
2372 	/*
2373 	 * We register our event handler after everything is set up,
2374 	 * and then update our cached info after the event handler is
2375 	 * registered to avoid any problems if a port changes state
2376 	 * during our initialization.
2377 	 */
2378 
2379 	INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2380 	ib_register_event_handler(&sa_dev->event_handler);
2381 
2382 	for (i = 0; i <= e - s; ++i) {
2383 		if (rdma_cap_ib_sa(device, i + 1))
2384 			update_sm_ah(&sa_dev->port[i].update_task);
2385 	}
2386 
2387 	return 0;
2388 
2389 err:
2390 	while (--i >= 0) {
2391 		if (rdma_cap_ib_sa(device, i + 1))
2392 			ib_unregister_mad_agent(sa_dev->port[i].agent);
2393 	}
2394 free:
2395 	kfree(sa_dev);
2396 	return ret;
2397 }
2398 
2399 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2400 {
2401 	struct ib_sa_device *sa_dev = client_data;
2402 	int i;
2403 
2404 	ib_unregister_event_handler(&sa_dev->event_handler);
2405 	flush_workqueue(ib_wq);
2406 
2407 	for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2408 		if (rdma_cap_ib_sa(device, i + 1)) {
2409 			cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2410 			ib_unregister_mad_agent(sa_dev->port[i].agent);
2411 			if (sa_dev->port[i].sm_ah)
2412 				kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2413 		}
2414 
2415 	}
2416 
2417 	kfree(sa_dev);
2418 }
2419 
2420 int ib_sa_init(void)
2421 {
2422 	int ret;
2423 
2424 	get_random_bytes(&tid, sizeof tid);
2425 
2426 	atomic_set(&ib_nl_sa_request_seq, 0);
2427 
2428 	ret = ib_register_client(&sa_client);
2429 	if (ret) {
2430 		pr_err("Couldn't register ib_sa client\n");
2431 		goto err1;
2432 	}
2433 
2434 	ret = mcast_init();
2435 	if (ret) {
2436 		pr_err("Couldn't initialize multicast handling\n");
2437 		goto err2;
2438 	}
2439 
2440 	ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2441 	if (!ib_nl_wq) {
2442 		ret = -ENOMEM;
2443 		goto err3;
2444 	}
2445 
2446 	INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2447 
2448 	return 0;
2449 
2450 err3:
2451 	mcast_cleanup();
2452 err2:
2453 	ib_unregister_client(&sa_client);
2454 err1:
2455 	return ret;
2456 }
2457 
2458 void ib_sa_cleanup(void)
2459 {
2460 	cancel_delayed_work(&ib_nl_timed_work);
2461 	flush_workqueue(ib_nl_wq);
2462 	destroy_workqueue(ib_nl_wq);
2463 	mcast_cleanup();
2464 	ib_unregister_client(&sa_client);
2465 	WARN_ON(!xa_empty(&queries));
2466 }
2467