1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/init.h>
36 #include <linux/err.h>
37 #include <linux/random.h>
38 #include <linux/spinlock.h>
39 #include <linux/slab.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/kref.h>
42 #include <linux/xarray.h>
43 #include <linux/workqueue.h>
44 #include <uapi/linux/if_ether.h>
45 #include <rdma/ib_pack.h>
46 #include <rdma/ib_cache.h>
47 #include <rdma/rdma_netlink.h>
48 #include <net/netlink.h>
49 #include <uapi/rdma/ib_user_sa.h>
50 #include <rdma/ib_marshall.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/opa_addr.h>
53 #include <rdma/rdma_cm.h>
54 #include "sa.h"
55 #include "core_priv.h"
56
57 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
58 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
59 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
60 #define IB_SA_CPI_MAX_RETRY_CNT 3
61 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */
62 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
63
64 struct ib_sa_sm_ah {
65 struct ib_ah *ah;
66 struct kref ref;
67 u16 pkey_index;
68 u8 src_path_mask;
69 };
70
71 enum rdma_class_port_info_type {
72 RDMA_CLASS_PORT_INFO_IB,
73 RDMA_CLASS_PORT_INFO_OPA
74 };
75
76 struct rdma_class_port_info {
77 enum rdma_class_port_info_type type;
78 union {
79 struct ib_class_port_info ib;
80 struct opa_class_port_info opa;
81 };
82 };
83
84 struct ib_sa_classport_cache {
85 bool valid;
86 int retry_cnt;
87 struct rdma_class_port_info data;
88 };
89
90 struct ib_sa_port {
91 struct ib_mad_agent *agent;
92 struct ib_sa_sm_ah *sm_ah;
93 struct work_struct update_task;
94 struct ib_sa_classport_cache classport_info;
95 struct delayed_work ib_cpi_work;
96 spinlock_t classport_lock; /* protects class port info set */
97 spinlock_t ah_lock;
98 u32 port_num;
99 };
100
101 struct ib_sa_device {
102 int start_port, end_port;
103 struct ib_event_handler event_handler;
104 struct ib_sa_port port[];
105 };
106
107 struct ib_sa_query {
108 void (*callback)(struct ib_sa_query *sa_query, int status,
109 struct ib_sa_mad *mad);
110 void (*rmpp_callback)(struct ib_sa_query *sa_query, int status,
111 struct ib_mad_recv_wc *mad);
112 void (*release)(struct ib_sa_query *);
113 struct ib_sa_client *client;
114 struct ib_sa_port *port;
115 struct ib_mad_send_buf *mad_buf;
116 struct ib_sa_sm_ah *sm_ah;
117 int id;
118 u32 flags;
119 struct list_head list; /* Local svc request list */
120 u32 seq; /* Local svc request sequence number */
121 unsigned long timeout; /* Local svc timeout */
122 u8 path_use; /* How will the pathrecord be used */
123 };
124
125 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
126 #define IB_SA_CANCEL 0x00000002
127 #define IB_SA_QUERY_OPA 0x00000004
128
129 struct ib_sa_path_query {
130 void (*callback)(int status, struct sa_path_rec *rec,
131 unsigned int num_paths, void *context);
132 void *context;
133 struct ib_sa_query sa_query;
134 struct sa_path_rec *conv_pr;
135 };
136
137 struct ib_sa_guidinfo_query {
138 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
139 void *context;
140 struct ib_sa_query sa_query;
141 };
142
143 struct ib_sa_classport_info_query {
144 void (*callback)(void *);
145 void *context;
146 struct ib_sa_query sa_query;
147 };
148
149 struct ib_sa_mcmember_query {
150 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
151 void *context;
152 struct ib_sa_query sa_query;
153 };
154
155 struct ib_sa_service_query {
156 void (*callback)(int status, struct sa_service_rec *rec,
157 unsigned int num_services, void *context);
158 void *context;
159 struct ib_sa_query sa_query;
160 };
161
162 static LIST_HEAD(ib_nl_request_list);
163 static DEFINE_SPINLOCK(ib_nl_request_lock);
164 static atomic_t ib_nl_sa_request_seq;
165 static struct workqueue_struct *ib_nl_wq;
166 static struct delayed_work ib_nl_timed_work;
167 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
168 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
169 .len = sizeof(struct ib_path_rec_data)},
170 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
171 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
172 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
173 .len = sizeof(struct rdma_nla_ls_gid)},
174 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
175 .len = sizeof(struct rdma_nla_ls_gid)},
176 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
177 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
178 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
179 };
180
181
182 static int ib_sa_add_one(struct ib_device *device);
183 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
184
185 static struct ib_client sa_client = {
186 .name = "sa",
187 .add = ib_sa_add_one,
188 .remove = ib_sa_remove_one
189 };
190
191 static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
192
193 static DEFINE_SPINLOCK(tid_lock);
194 static u32 tid;
195
196 #define PATH_REC_FIELD(field) \
197 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \
198 .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \
199 .field_name = "sa_path_rec:" #field
200
201 static const struct ib_field path_rec_table[] = {
202 { PATH_REC_FIELD(service_id),
203 .offset_words = 0,
204 .offset_bits = 0,
205 .size_bits = 64 },
206 { PATH_REC_FIELD(dgid),
207 .offset_words = 2,
208 .offset_bits = 0,
209 .size_bits = 128 },
210 { PATH_REC_FIELD(sgid),
211 .offset_words = 6,
212 .offset_bits = 0,
213 .size_bits = 128 },
214 { PATH_REC_FIELD(ib.dlid),
215 .offset_words = 10,
216 .offset_bits = 0,
217 .size_bits = 16 },
218 { PATH_REC_FIELD(ib.slid),
219 .offset_words = 10,
220 .offset_bits = 16,
221 .size_bits = 16 },
222 { PATH_REC_FIELD(ib.raw_traffic),
223 .offset_words = 11,
224 .offset_bits = 0,
225 .size_bits = 1 },
226 { RESERVED,
227 .offset_words = 11,
228 .offset_bits = 1,
229 .size_bits = 3 },
230 { PATH_REC_FIELD(flow_label),
231 .offset_words = 11,
232 .offset_bits = 4,
233 .size_bits = 20 },
234 { PATH_REC_FIELD(hop_limit),
235 .offset_words = 11,
236 .offset_bits = 24,
237 .size_bits = 8 },
238 { PATH_REC_FIELD(traffic_class),
239 .offset_words = 12,
240 .offset_bits = 0,
241 .size_bits = 8 },
242 { PATH_REC_FIELD(reversible),
243 .offset_words = 12,
244 .offset_bits = 8,
245 .size_bits = 1 },
246 { PATH_REC_FIELD(numb_path),
247 .offset_words = 12,
248 .offset_bits = 9,
249 .size_bits = 7 },
250 { PATH_REC_FIELD(pkey),
251 .offset_words = 12,
252 .offset_bits = 16,
253 .size_bits = 16 },
254 { PATH_REC_FIELD(qos_class),
255 .offset_words = 13,
256 .offset_bits = 0,
257 .size_bits = 12 },
258 { PATH_REC_FIELD(sl),
259 .offset_words = 13,
260 .offset_bits = 12,
261 .size_bits = 4 },
262 { PATH_REC_FIELD(mtu_selector),
263 .offset_words = 13,
264 .offset_bits = 16,
265 .size_bits = 2 },
266 { PATH_REC_FIELD(mtu),
267 .offset_words = 13,
268 .offset_bits = 18,
269 .size_bits = 6 },
270 { PATH_REC_FIELD(rate_selector),
271 .offset_words = 13,
272 .offset_bits = 24,
273 .size_bits = 2 },
274 { PATH_REC_FIELD(rate),
275 .offset_words = 13,
276 .offset_bits = 26,
277 .size_bits = 6 },
278 { PATH_REC_FIELD(packet_life_time_selector),
279 .offset_words = 14,
280 .offset_bits = 0,
281 .size_bits = 2 },
282 { PATH_REC_FIELD(packet_life_time),
283 .offset_words = 14,
284 .offset_bits = 2,
285 .size_bits = 6 },
286 { PATH_REC_FIELD(preference),
287 .offset_words = 14,
288 .offset_bits = 8,
289 .size_bits = 8 },
290 { RESERVED,
291 .offset_words = 14,
292 .offset_bits = 16,
293 .size_bits = 48 },
294 };
295
296 #define OPA_PATH_REC_FIELD(field) \
297 .struct_offset_bytes = \
298 offsetof(struct sa_path_rec, field), \
299 .struct_size_bytes = \
300 sizeof_field(struct sa_path_rec, field), \
301 .field_name = "sa_path_rec:" #field
302
303 static const struct ib_field opa_path_rec_table[] = {
304 { OPA_PATH_REC_FIELD(service_id),
305 .offset_words = 0,
306 .offset_bits = 0,
307 .size_bits = 64 },
308 { OPA_PATH_REC_FIELD(dgid),
309 .offset_words = 2,
310 .offset_bits = 0,
311 .size_bits = 128 },
312 { OPA_PATH_REC_FIELD(sgid),
313 .offset_words = 6,
314 .offset_bits = 0,
315 .size_bits = 128 },
316 { OPA_PATH_REC_FIELD(opa.dlid),
317 .offset_words = 10,
318 .offset_bits = 0,
319 .size_bits = 32 },
320 { OPA_PATH_REC_FIELD(opa.slid),
321 .offset_words = 11,
322 .offset_bits = 0,
323 .size_bits = 32 },
324 { OPA_PATH_REC_FIELD(opa.raw_traffic),
325 .offset_words = 12,
326 .offset_bits = 0,
327 .size_bits = 1 },
328 { RESERVED,
329 .offset_words = 12,
330 .offset_bits = 1,
331 .size_bits = 3 },
332 { OPA_PATH_REC_FIELD(flow_label),
333 .offset_words = 12,
334 .offset_bits = 4,
335 .size_bits = 20 },
336 { OPA_PATH_REC_FIELD(hop_limit),
337 .offset_words = 12,
338 .offset_bits = 24,
339 .size_bits = 8 },
340 { OPA_PATH_REC_FIELD(traffic_class),
341 .offset_words = 13,
342 .offset_bits = 0,
343 .size_bits = 8 },
344 { OPA_PATH_REC_FIELD(reversible),
345 .offset_words = 13,
346 .offset_bits = 8,
347 .size_bits = 1 },
348 { OPA_PATH_REC_FIELD(numb_path),
349 .offset_words = 13,
350 .offset_bits = 9,
351 .size_bits = 7 },
352 { OPA_PATH_REC_FIELD(pkey),
353 .offset_words = 13,
354 .offset_bits = 16,
355 .size_bits = 16 },
356 { OPA_PATH_REC_FIELD(opa.l2_8B),
357 .offset_words = 14,
358 .offset_bits = 0,
359 .size_bits = 1 },
360 { OPA_PATH_REC_FIELD(opa.l2_10B),
361 .offset_words = 14,
362 .offset_bits = 1,
363 .size_bits = 1 },
364 { OPA_PATH_REC_FIELD(opa.l2_9B),
365 .offset_words = 14,
366 .offset_bits = 2,
367 .size_bits = 1 },
368 { OPA_PATH_REC_FIELD(opa.l2_16B),
369 .offset_words = 14,
370 .offset_bits = 3,
371 .size_bits = 1 },
372 { RESERVED,
373 .offset_words = 14,
374 .offset_bits = 4,
375 .size_bits = 2 },
376 { OPA_PATH_REC_FIELD(opa.qos_type),
377 .offset_words = 14,
378 .offset_bits = 6,
379 .size_bits = 2 },
380 { OPA_PATH_REC_FIELD(opa.qos_priority),
381 .offset_words = 14,
382 .offset_bits = 8,
383 .size_bits = 8 },
384 { RESERVED,
385 .offset_words = 14,
386 .offset_bits = 16,
387 .size_bits = 3 },
388 { OPA_PATH_REC_FIELD(sl),
389 .offset_words = 14,
390 .offset_bits = 19,
391 .size_bits = 5 },
392 { RESERVED,
393 .offset_words = 14,
394 .offset_bits = 24,
395 .size_bits = 8 },
396 { OPA_PATH_REC_FIELD(mtu_selector),
397 .offset_words = 15,
398 .offset_bits = 0,
399 .size_bits = 2 },
400 { OPA_PATH_REC_FIELD(mtu),
401 .offset_words = 15,
402 .offset_bits = 2,
403 .size_bits = 6 },
404 { OPA_PATH_REC_FIELD(rate_selector),
405 .offset_words = 15,
406 .offset_bits = 8,
407 .size_bits = 2 },
408 { OPA_PATH_REC_FIELD(rate),
409 .offset_words = 15,
410 .offset_bits = 10,
411 .size_bits = 6 },
412 { OPA_PATH_REC_FIELD(packet_life_time_selector),
413 .offset_words = 15,
414 .offset_bits = 16,
415 .size_bits = 2 },
416 { OPA_PATH_REC_FIELD(packet_life_time),
417 .offset_words = 15,
418 .offset_bits = 18,
419 .size_bits = 6 },
420 { OPA_PATH_REC_FIELD(preference),
421 .offset_words = 15,
422 .offset_bits = 24,
423 .size_bits = 8 },
424 };
425
426 #define MCMEMBER_REC_FIELD(field) \
427 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
428 .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \
429 .field_name = "sa_mcmember_rec:" #field
430
431 static const struct ib_field mcmember_rec_table[] = {
432 { MCMEMBER_REC_FIELD(mgid),
433 .offset_words = 0,
434 .offset_bits = 0,
435 .size_bits = 128 },
436 { MCMEMBER_REC_FIELD(port_gid),
437 .offset_words = 4,
438 .offset_bits = 0,
439 .size_bits = 128 },
440 { MCMEMBER_REC_FIELD(qkey),
441 .offset_words = 8,
442 .offset_bits = 0,
443 .size_bits = 32 },
444 { MCMEMBER_REC_FIELD(mlid),
445 .offset_words = 9,
446 .offset_bits = 0,
447 .size_bits = 16 },
448 { MCMEMBER_REC_FIELD(mtu_selector),
449 .offset_words = 9,
450 .offset_bits = 16,
451 .size_bits = 2 },
452 { MCMEMBER_REC_FIELD(mtu),
453 .offset_words = 9,
454 .offset_bits = 18,
455 .size_bits = 6 },
456 { MCMEMBER_REC_FIELD(traffic_class),
457 .offset_words = 9,
458 .offset_bits = 24,
459 .size_bits = 8 },
460 { MCMEMBER_REC_FIELD(pkey),
461 .offset_words = 10,
462 .offset_bits = 0,
463 .size_bits = 16 },
464 { MCMEMBER_REC_FIELD(rate_selector),
465 .offset_words = 10,
466 .offset_bits = 16,
467 .size_bits = 2 },
468 { MCMEMBER_REC_FIELD(rate),
469 .offset_words = 10,
470 .offset_bits = 18,
471 .size_bits = 6 },
472 { MCMEMBER_REC_FIELD(packet_life_time_selector),
473 .offset_words = 10,
474 .offset_bits = 24,
475 .size_bits = 2 },
476 { MCMEMBER_REC_FIELD(packet_life_time),
477 .offset_words = 10,
478 .offset_bits = 26,
479 .size_bits = 6 },
480 { MCMEMBER_REC_FIELD(sl),
481 .offset_words = 11,
482 .offset_bits = 0,
483 .size_bits = 4 },
484 { MCMEMBER_REC_FIELD(flow_label),
485 .offset_words = 11,
486 .offset_bits = 4,
487 .size_bits = 20 },
488 { MCMEMBER_REC_FIELD(hop_limit),
489 .offset_words = 11,
490 .offset_bits = 24,
491 .size_bits = 8 },
492 { MCMEMBER_REC_FIELD(scope),
493 .offset_words = 12,
494 .offset_bits = 0,
495 .size_bits = 4 },
496 { MCMEMBER_REC_FIELD(join_state),
497 .offset_words = 12,
498 .offset_bits = 4,
499 .size_bits = 4 },
500 { MCMEMBER_REC_FIELD(proxy_join),
501 .offset_words = 12,
502 .offset_bits = 8,
503 .size_bits = 1 },
504 { RESERVED,
505 .offset_words = 12,
506 .offset_bits = 9,
507 .size_bits = 23 },
508 };
509
510 #define CLASSPORTINFO_REC_FIELD(field) \
511 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
512 .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \
513 .field_name = "ib_class_port_info:" #field
514
515 static const struct ib_field ib_classport_info_rec_table[] = {
516 { CLASSPORTINFO_REC_FIELD(base_version),
517 .offset_words = 0,
518 .offset_bits = 0,
519 .size_bits = 8 },
520 { CLASSPORTINFO_REC_FIELD(class_version),
521 .offset_words = 0,
522 .offset_bits = 8,
523 .size_bits = 8 },
524 { CLASSPORTINFO_REC_FIELD(capability_mask),
525 .offset_words = 0,
526 .offset_bits = 16,
527 .size_bits = 16 },
528 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
529 .offset_words = 1,
530 .offset_bits = 0,
531 .size_bits = 32 },
532 { CLASSPORTINFO_REC_FIELD(redirect_gid),
533 .offset_words = 2,
534 .offset_bits = 0,
535 .size_bits = 128 },
536 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
537 .offset_words = 6,
538 .offset_bits = 0,
539 .size_bits = 32 },
540 { CLASSPORTINFO_REC_FIELD(redirect_lid),
541 .offset_words = 7,
542 .offset_bits = 0,
543 .size_bits = 16 },
544 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
545 .offset_words = 7,
546 .offset_bits = 16,
547 .size_bits = 16 },
548
549 { CLASSPORTINFO_REC_FIELD(redirect_qp),
550 .offset_words = 8,
551 .offset_bits = 0,
552 .size_bits = 32 },
553 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
554 .offset_words = 9,
555 .offset_bits = 0,
556 .size_bits = 32 },
557
558 { CLASSPORTINFO_REC_FIELD(trap_gid),
559 .offset_words = 10,
560 .offset_bits = 0,
561 .size_bits = 128 },
562 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
563 .offset_words = 14,
564 .offset_bits = 0,
565 .size_bits = 32 },
566
567 { CLASSPORTINFO_REC_FIELD(trap_lid),
568 .offset_words = 15,
569 .offset_bits = 0,
570 .size_bits = 16 },
571 { CLASSPORTINFO_REC_FIELD(trap_pkey),
572 .offset_words = 15,
573 .offset_bits = 16,
574 .size_bits = 16 },
575
576 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
577 .offset_words = 16,
578 .offset_bits = 0,
579 .size_bits = 32 },
580 { CLASSPORTINFO_REC_FIELD(trap_qkey),
581 .offset_words = 17,
582 .offset_bits = 0,
583 .size_bits = 32 },
584 };
585
586 #define OPA_CLASSPORTINFO_REC_FIELD(field) \
587 .struct_offset_bytes =\
588 offsetof(struct opa_class_port_info, field), \
589 .struct_size_bytes = \
590 sizeof_field(struct opa_class_port_info, field), \
591 .field_name = "opa_class_port_info:" #field
592
593 static const struct ib_field opa_classport_info_rec_table[] = {
594 { OPA_CLASSPORTINFO_REC_FIELD(base_version),
595 .offset_words = 0,
596 .offset_bits = 0,
597 .size_bits = 8 },
598 { OPA_CLASSPORTINFO_REC_FIELD(class_version),
599 .offset_words = 0,
600 .offset_bits = 8,
601 .size_bits = 8 },
602 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
603 .offset_words = 0,
604 .offset_bits = 16,
605 .size_bits = 16 },
606 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
607 .offset_words = 1,
608 .offset_bits = 0,
609 .size_bits = 32 },
610 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
611 .offset_words = 2,
612 .offset_bits = 0,
613 .size_bits = 128 },
614 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
615 .offset_words = 6,
616 .offset_bits = 0,
617 .size_bits = 32 },
618 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
619 .offset_words = 7,
620 .offset_bits = 0,
621 .size_bits = 32 },
622 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
623 .offset_words = 8,
624 .offset_bits = 0,
625 .size_bits = 32 },
626 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
627 .offset_words = 9,
628 .offset_bits = 0,
629 .size_bits = 32 },
630 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
631 .offset_words = 10,
632 .offset_bits = 0,
633 .size_bits = 128 },
634 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
635 .offset_words = 14,
636 .offset_bits = 0,
637 .size_bits = 32 },
638 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
639 .offset_words = 15,
640 .offset_bits = 0,
641 .size_bits = 32 },
642 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
643 .offset_words = 16,
644 .offset_bits = 0,
645 .size_bits = 32 },
646 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
647 .offset_words = 17,
648 .offset_bits = 0,
649 .size_bits = 32 },
650 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
651 .offset_words = 18,
652 .offset_bits = 0,
653 .size_bits = 16 },
654 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
655 .offset_words = 18,
656 .offset_bits = 16,
657 .size_bits = 16 },
658 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
659 .offset_words = 19,
660 .offset_bits = 0,
661 .size_bits = 8 },
662 { RESERVED,
663 .offset_words = 19,
664 .offset_bits = 8,
665 .size_bits = 24 },
666 };
667
668 #define GUIDINFO_REC_FIELD(field) \
669 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
670 .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \
671 .field_name = "sa_guidinfo_rec:" #field
672
673 static const struct ib_field guidinfo_rec_table[] = {
674 { GUIDINFO_REC_FIELD(lid),
675 .offset_words = 0,
676 .offset_bits = 0,
677 .size_bits = 16 },
678 { GUIDINFO_REC_FIELD(block_num),
679 .offset_words = 0,
680 .offset_bits = 16,
681 .size_bits = 8 },
682 { GUIDINFO_REC_FIELD(res1),
683 .offset_words = 0,
684 .offset_bits = 24,
685 .size_bits = 8 },
686 { GUIDINFO_REC_FIELD(res2),
687 .offset_words = 1,
688 .offset_bits = 0,
689 .size_bits = 32 },
690 { GUIDINFO_REC_FIELD(guid_info_list),
691 .offset_words = 2,
692 .offset_bits = 0,
693 .size_bits = 512 },
694 };
695
696 #define SERVICE_REC_FIELD(field) \
697 .struct_offset_bytes = offsetof(struct sa_service_rec, field), \
698 .struct_size_bytes = sizeof_field(struct sa_service_rec, field), \
699 .field_name = "sa_service_rec:" #field
700
701 static const struct ib_field service_rec_table[] = {
702 { SERVICE_REC_FIELD(id),
703 .offset_words = 0,
704 .offset_bits = 0,
705 .size_bits = 64 },
706 { SERVICE_REC_FIELD(gid),
707 .offset_words = 2,
708 .offset_bits = 0,
709 .size_bits = 128 },
710 { SERVICE_REC_FIELD(pkey),
711 .offset_words = 6,
712 .offset_bits = 0,
713 .size_bits = 16 },
714 { RESERVED,
715 .offset_words = 6,
716 .offset_bits = 16,
717 .size_bits = 16 },
718 { SERVICE_REC_FIELD(lease),
719 .offset_words = 7,
720 .offset_bits = 0,
721 .size_bits = 32 },
722 { SERVICE_REC_FIELD(key),
723 .offset_words = 8,
724 .offset_bits = 0,
725 .size_bits = 128 },
726 { SERVICE_REC_FIELD(name),
727 .offset_words = 12,
728 .offset_bits = 0,
729 .size_bits = 512 },
730 { SERVICE_REC_FIELD(data_8),
731 .offset_words = 28,
732 .offset_bits = 0,
733 .size_bits = 128 },
734 { SERVICE_REC_FIELD(data_16),
735 .offset_words = 32,
736 .offset_bits = 0,
737 .size_bits = 128 },
738 { SERVICE_REC_FIELD(data_32),
739 .offset_words = 36,
740 .offset_bits = 0,
741 .size_bits = 128 },
742 { SERVICE_REC_FIELD(data_64),
743 .offset_words = 40,
744 .offset_bits = 0,
745 .size_bits = 128 },
746 };
747
748 #define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
749
ib_sa_disable_local_svc(struct ib_sa_query * query)750 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
751 {
752 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
753 }
754
ib_sa_query_cancelled(struct ib_sa_query * query)755 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
756 {
757 return (query->flags & IB_SA_CANCEL);
758 }
759
ib_nl_set_path_rec_attrs(struct sk_buff * skb,struct ib_sa_query * query)760 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
761 struct ib_sa_query *query)
762 {
763 struct sa_path_rec *sa_rec = query->mad_buf->context[1];
764 struct ib_sa_mad *mad = query->mad_buf->mad;
765 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
766 u16 val16;
767 u64 val64;
768 struct rdma_ls_resolve_header *header;
769
770 query->mad_buf->context[1] = NULL;
771
772 /* Construct the family header first */
773 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
774 strscpy_pad(header->device_name,
775 dev_name(&query->port->agent->device->dev),
776 LS_DEVICE_NAME_MAX);
777 header->port_num = query->port->port_num;
778
779 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
780 sa_rec->reversible != 0)
781 query->path_use = LS_RESOLVE_PATH_USE_ALL;
782 else
783 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
784 header->path_use = query->path_use;
785
786 /* Now build the attributes */
787 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
788 val64 = be64_to_cpu(sa_rec->service_id);
789 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
790 sizeof(val64), &val64);
791 }
792 if (comp_mask & IB_SA_PATH_REC_DGID)
793 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
794 sizeof(sa_rec->dgid), &sa_rec->dgid);
795 if (comp_mask & IB_SA_PATH_REC_SGID)
796 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
797 sizeof(sa_rec->sgid), &sa_rec->sgid);
798 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
799 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
800 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
801
802 if (comp_mask & IB_SA_PATH_REC_PKEY) {
803 val16 = be16_to_cpu(sa_rec->pkey);
804 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
805 sizeof(val16), &val16);
806 }
807 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
808 val16 = be16_to_cpu(sa_rec->qos_class);
809 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
810 sizeof(val16), &val16);
811 }
812 }
813
ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)814 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
815 {
816 int len = 0;
817
818 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
819 len += nla_total_size(sizeof(u64));
820 if (comp_mask & IB_SA_PATH_REC_DGID)
821 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
822 if (comp_mask & IB_SA_PATH_REC_SGID)
823 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
824 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
825 len += nla_total_size(sizeof(u8));
826 if (comp_mask & IB_SA_PATH_REC_PKEY)
827 len += nla_total_size(sizeof(u16));
828 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
829 len += nla_total_size(sizeof(u16));
830
831 /*
832 * Make sure that at least some of the required comp_mask bits are
833 * set.
834 */
835 if (WARN_ON(len == 0))
836 return len;
837
838 /* Add the family header */
839 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
840
841 return len;
842 }
843
ib_nl_make_request(struct ib_sa_query * query,gfp_t gfp_mask)844 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
845 {
846 struct sk_buff *skb = NULL;
847 struct nlmsghdr *nlh;
848 void *data;
849 struct ib_sa_mad *mad;
850 int len;
851 unsigned long flags;
852 unsigned long delay;
853 gfp_t gfp_flag;
854 int ret;
855
856 INIT_LIST_HEAD(&query->list);
857 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
858
859 mad = query->mad_buf->mad;
860 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
861 if (len <= 0)
862 return -EMSGSIZE;
863
864 skb = nlmsg_new(len, gfp_mask);
865 if (!skb)
866 return -ENOMEM;
867
868 /* Put nlmsg header only for now */
869 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
870 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
871 if (!data) {
872 nlmsg_free(skb);
873 return -EMSGSIZE;
874 }
875
876 /* Add attributes */
877 ib_nl_set_path_rec_attrs(skb, query);
878
879 /* Repair the nlmsg header length */
880 nlmsg_end(skb, nlh);
881
882 gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
883 GFP_NOWAIT;
884
885 spin_lock_irqsave(&ib_nl_request_lock, flags);
886 ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
887
888 if (ret)
889 goto out;
890
891 /* Put the request on the list.*/
892 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
893 query->timeout = delay + jiffies;
894 list_add_tail(&query->list, &ib_nl_request_list);
895 /* Start the timeout if this is the only request */
896 if (ib_nl_request_list.next == &query->list)
897 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
898
899 out:
900 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
901
902 return ret;
903 }
904
ib_nl_cancel_request(struct ib_sa_query * query)905 static int ib_nl_cancel_request(struct ib_sa_query *query)
906 {
907 unsigned long flags;
908 struct ib_sa_query *wait_query;
909 int found = 0;
910
911 spin_lock_irqsave(&ib_nl_request_lock, flags);
912 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
913 /* Let the timeout to take care of the callback */
914 if (query == wait_query) {
915 query->flags |= IB_SA_CANCEL;
916 query->timeout = jiffies;
917 list_move(&query->list, &ib_nl_request_list);
918 found = 1;
919 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
920 break;
921 }
922 }
923 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
924
925 return found;
926 }
927
928 static void send_handler(struct ib_mad_agent *agent,
929 struct ib_mad_send_wc *mad_send_wc);
930
ib_nl_process_good_resolve_rsp(struct ib_sa_query * query,const struct nlmsghdr * nlh)931 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
932 const struct nlmsghdr *nlh)
933 {
934 struct sa_path_rec recs[RDMA_PRIMARY_PATH_MAX_REC_NUM];
935 struct ib_sa_path_query *path_query;
936 struct ib_path_rec_data *rec_data;
937 struct ib_mad_send_wc mad_send_wc;
938 const struct nlattr *head, *curr;
939 struct ib_sa_mad *mad = NULL;
940 int len, rem, status = -EIO;
941 unsigned int num_prs = 0;
942 u32 mask = 0;
943
944 if (!query->callback)
945 goto out;
946
947 path_query = container_of(query, struct ib_sa_path_query, sa_query);
948 mad = query->mad_buf->mad;
949
950 head = (const struct nlattr *) nlmsg_data(nlh);
951 len = nlmsg_len(nlh);
952 switch (query->path_use) {
953 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
954 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
955 break;
956
957 case LS_RESOLVE_PATH_USE_ALL:
958 mask = IB_PATH_PRIMARY;
959 break;
960
961 case LS_RESOLVE_PATH_USE_GMP:
962 default:
963 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
964 IB_PATH_BIDIRECTIONAL;
965 break;
966 }
967
968 nla_for_each_attr(curr, head, len, rem) {
969 if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD)
970 continue;
971
972 rec_data = nla_data(curr);
973 if ((rec_data->flags & mask) != mask)
974 continue;
975
976 if ((query->flags & IB_SA_QUERY_OPA) ||
977 path_query->conv_pr) {
978 mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
979 memcpy(mad->data, rec_data->path_rec,
980 sizeof(rec_data->path_rec));
981 query->callback(query, 0, mad);
982 goto out;
983 }
984
985 status = 0;
986 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
987 rec_data->path_rec, &recs[num_prs]);
988 recs[num_prs].flags = rec_data->flags;
989 recs[num_prs].rec_type = SA_PATH_REC_TYPE_IB;
990 sa_path_set_dmac_zero(&recs[num_prs]);
991
992 num_prs++;
993 if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM)
994 break;
995 }
996
997 if (!status) {
998 mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
999 path_query->callback(status, recs, num_prs,
1000 path_query->context);
1001 } else
1002 query->callback(query, status, mad);
1003
1004 out:
1005 mad_send_wc.send_buf = query->mad_buf;
1006 mad_send_wc.status = IB_WC_SUCCESS;
1007 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
1008 }
1009
ib_nl_request_timeout(struct work_struct * work)1010 static void ib_nl_request_timeout(struct work_struct *work)
1011 {
1012 unsigned long flags;
1013 struct ib_sa_query *query;
1014 unsigned long delay;
1015 struct ib_mad_send_wc mad_send_wc;
1016 int ret;
1017
1018 spin_lock_irqsave(&ib_nl_request_lock, flags);
1019 while (!list_empty(&ib_nl_request_list)) {
1020 query = list_entry(ib_nl_request_list.next,
1021 struct ib_sa_query, list);
1022
1023 if (time_after(query->timeout, jiffies)) {
1024 delay = query->timeout - jiffies;
1025 if ((long)delay <= 0)
1026 delay = 1;
1027 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
1028 break;
1029 }
1030
1031 list_del(&query->list);
1032 ib_sa_disable_local_svc(query);
1033 /* Hold the lock to protect against query cancellation */
1034 if (ib_sa_query_cancelled(query))
1035 ret = -1;
1036 else
1037 ret = ib_post_send_mad(query->mad_buf, NULL);
1038 if (ret) {
1039 mad_send_wc.send_buf = query->mad_buf;
1040 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
1041 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1042 send_handler(query->port->agent, &mad_send_wc);
1043 spin_lock_irqsave(&ib_nl_request_lock, flags);
1044 }
1045 }
1046 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1047 }
1048
ib_nl_handle_set_timeout(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1049 int ib_nl_handle_set_timeout(struct sk_buff *skb,
1050 struct nlmsghdr *nlh,
1051 struct netlink_ext_ack *extack)
1052 {
1053 int timeout, delta, abs_delta;
1054 const struct nlattr *attr;
1055 unsigned long flags;
1056 struct ib_sa_query *query;
1057 long delay = 0;
1058 struct nlattr *tb[LS_NLA_TYPE_MAX];
1059 int ret;
1060
1061 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
1062 !(NETLINK_CB(skb).sk))
1063 return -EPERM;
1064
1065 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1066 nlmsg_len(nlh), ib_nl_policy, NULL);
1067 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
1068 if (ret || !attr)
1069 goto settimeout_out;
1070
1071 timeout = *(int *) nla_data(attr);
1072 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
1073 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
1074 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
1075 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
1076
1077 spin_lock_irqsave(&ib_nl_request_lock, flags);
1078
1079 delta = timeout - sa_local_svc_timeout_ms;
1080 if (delta < 0)
1081 abs_delta = -delta;
1082 else
1083 abs_delta = delta;
1084
1085 if (delta != 0) {
1086 sa_local_svc_timeout_ms = timeout;
1087 list_for_each_entry(query, &ib_nl_request_list, list) {
1088 if (delta < 0 && abs_delta > query->timeout)
1089 query->timeout = 0;
1090 else
1091 query->timeout += delta;
1092
1093 /* Get the new delay from the first entry */
1094 if (!delay) {
1095 delay = query->timeout - jiffies;
1096 if (delay <= 0)
1097 delay = 1;
1098 }
1099 }
1100 if (delay)
1101 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
1102 (unsigned long)delay);
1103 }
1104
1105 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1106
1107 settimeout_out:
1108 return 0;
1109 }
1110
ib_nl_is_good_resolve_resp(const struct nlmsghdr * nlh)1111 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1112 {
1113 struct nlattr *tb[LS_NLA_TYPE_MAX];
1114 int ret;
1115
1116 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
1117 return 0;
1118
1119 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1120 nlmsg_len(nlh), ib_nl_policy, NULL);
1121 if (ret)
1122 return 0;
1123
1124 return 1;
1125 }
1126
ib_nl_handle_resolve_resp(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1127 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1128 struct nlmsghdr *nlh,
1129 struct netlink_ext_ack *extack)
1130 {
1131 unsigned long flags;
1132 struct ib_sa_query *query = NULL, *iter;
1133 struct ib_mad_send_buf *send_buf;
1134 struct ib_mad_send_wc mad_send_wc;
1135 int ret;
1136
1137 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
1138 !(NETLINK_CB(skb).sk))
1139 return -EPERM;
1140
1141 spin_lock_irqsave(&ib_nl_request_lock, flags);
1142 list_for_each_entry(iter, &ib_nl_request_list, list) {
1143 /*
1144 * If the query is cancelled, let the timeout routine
1145 * take care of it.
1146 */
1147 if (nlh->nlmsg_seq == iter->seq) {
1148 if (!ib_sa_query_cancelled(iter)) {
1149 list_del(&iter->list);
1150 query = iter;
1151 }
1152 break;
1153 }
1154 }
1155
1156 if (!query) {
1157 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1158 goto resp_out;
1159 }
1160
1161 send_buf = query->mad_buf;
1162
1163 if (!ib_nl_is_good_resolve_resp(nlh)) {
1164 /* if the result is a failure, send out the packet via IB */
1165 ib_sa_disable_local_svc(query);
1166 ret = ib_post_send_mad(query->mad_buf, NULL);
1167 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1168 if (ret) {
1169 mad_send_wc.send_buf = send_buf;
1170 mad_send_wc.status = IB_WC_GENERAL_ERR;
1171 send_handler(query->port->agent, &mad_send_wc);
1172 }
1173 } else {
1174 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1175 ib_nl_process_good_resolve_rsp(query, nlh);
1176 }
1177
1178 resp_out:
1179 return 0;
1180 }
1181
free_sm_ah(struct kref * kref)1182 static void free_sm_ah(struct kref *kref)
1183 {
1184 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1185
1186 rdma_destroy_ah(sm_ah->ah, 0);
1187 kfree(sm_ah);
1188 }
1189
ib_sa_register_client(struct ib_sa_client * client)1190 void ib_sa_register_client(struct ib_sa_client *client)
1191 {
1192 atomic_set(&client->users, 1);
1193 init_completion(&client->comp);
1194 }
1195 EXPORT_SYMBOL(ib_sa_register_client);
1196
ib_sa_unregister_client(struct ib_sa_client * client)1197 void ib_sa_unregister_client(struct ib_sa_client *client)
1198 {
1199 ib_sa_client_put(client);
1200 wait_for_completion(&client->comp);
1201 }
1202 EXPORT_SYMBOL(ib_sa_unregister_client);
1203
1204 /**
1205 * ib_sa_cancel_query - try to cancel an SA query
1206 * @id:ID of query to cancel
1207 * @query:query pointer to cancel
1208 *
1209 * Try to cancel an SA query. If the id and query don't match up or
1210 * the query has already completed, nothing is done. Otherwise the
1211 * query is canceled and will complete with a status of -EINTR.
1212 */
ib_sa_cancel_query(int id,struct ib_sa_query * query)1213 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1214 {
1215 unsigned long flags;
1216 struct ib_mad_send_buf *mad_buf;
1217
1218 xa_lock_irqsave(&queries, flags);
1219 if (xa_load(&queries, id) != query) {
1220 xa_unlock_irqrestore(&queries, flags);
1221 return;
1222 }
1223 mad_buf = query->mad_buf;
1224 xa_unlock_irqrestore(&queries, flags);
1225
1226 /*
1227 * If the query is still on the netlink request list, schedule
1228 * it to be cancelled by the timeout routine. Otherwise, it has been
1229 * sent to the MAD layer and has to be cancelled from there.
1230 */
1231 if (!ib_nl_cancel_request(query))
1232 ib_cancel_mad(mad_buf);
1233 }
1234 EXPORT_SYMBOL(ib_sa_cancel_query);
1235
get_src_path_mask(struct ib_device * device,u32 port_num)1236 static u8 get_src_path_mask(struct ib_device *device, u32 port_num)
1237 {
1238 struct ib_sa_device *sa_dev;
1239 struct ib_sa_port *port;
1240 unsigned long flags;
1241 u8 src_path_mask;
1242
1243 sa_dev = ib_get_client_data(device, &sa_client);
1244 if (!sa_dev)
1245 return 0x7f;
1246
1247 port = &sa_dev->port[port_num - sa_dev->start_port];
1248 spin_lock_irqsave(&port->ah_lock, flags);
1249 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1250 spin_unlock_irqrestore(&port->ah_lock, flags);
1251
1252 return src_path_mask;
1253 }
1254
init_ah_attr_grh_fields(struct ib_device * device,u32 port_num,struct sa_path_rec * rec,struct rdma_ah_attr * ah_attr,const struct ib_gid_attr * gid_attr)1255 static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num,
1256 struct sa_path_rec *rec,
1257 struct rdma_ah_attr *ah_attr,
1258 const struct ib_gid_attr *gid_attr)
1259 {
1260 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
1261
1262 if (!gid_attr) {
1263 gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type,
1264 port_num, NULL);
1265 if (IS_ERR(gid_attr))
1266 return PTR_ERR(gid_attr);
1267 } else
1268 rdma_hold_gid_attr(gid_attr);
1269
1270 rdma_move_grh_sgid_attr(ah_attr, &rec->dgid,
1271 be32_to_cpu(rec->flow_label),
1272 rec->hop_limit, rec->traffic_class,
1273 gid_attr);
1274 return 0;
1275 }
1276
1277 /**
1278 * ib_init_ah_attr_from_path - Initialize address handle attributes based on
1279 * an SA path record.
1280 * @device: Device associated ah attributes initialization.
1281 * @port_num: Port on the specified device.
1282 * @rec: path record entry to use for ah attributes initialization.
1283 * @ah_attr: address handle attributes to initialization from path record.
1284 * @gid_attr: SGID attribute to consider during initialization.
1285 *
1286 * When ib_init_ah_attr_from_path() returns success,
1287 * (a) for IB link layer it optionally contains a reference to SGID attribute
1288 * when GRH is present for IB link layer.
1289 * (b) for RoCE link layer it contains a reference to SGID attribute.
1290 * User must invoke rdma_destroy_ah_attr() to release reference to SGID
1291 * attributes which are initialized using ib_init_ah_attr_from_path().
1292 */
ib_init_ah_attr_from_path(struct ib_device * device,u32 port_num,struct sa_path_rec * rec,struct rdma_ah_attr * ah_attr,const struct ib_gid_attr * gid_attr)1293 int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
1294 struct sa_path_rec *rec,
1295 struct rdma_ah_attr *ah_attr,
1296 const struct ib_gid_attr *gid_attr)
1297 {
1298 int ret = 0;
1299
1300 memset(ah_attr, 0, sizeof(*ah_attr));
1301 ah_attr->type = rdma_ah_find_type(device, port_num);
1302 rdma_ah_set_sl(ah_attr, rec->sl);
1303 rdma_ah_set_port_num(ah_attr, port_num);
1304 rdma_ah_set_static_rate(ah_attr, rec->rate);
1305
1306 if (sa_path_is_roce(rec)) {
1307 ret = roce_resolve_route_from_path(rec, gid_attr);
1308 if (ret)
1309 return ret;
1310
1311 memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN);
1312 } else {
1313 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
1314 if (sa_path_is_opa(rec) &&
1315 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))
1316 rdma_ah_set_make_grd(ah_attr, true);
1317
1318 rdma_ah_set_path_bits(ah_attr,
1319 be32_to_cpu(sa_path_get_slid(rec)) &
1320 get_src_path_mask(device, port_num));
1321 }
1322
1323 if (rec->hop_limit > 0 || sa_path_is_roce(rec))
1324 ret = init_ah_attr_grh_fields(device, port_num,
1325 rec, ah_attr, gid_attr);
1326 return ret;
1327 }
1328 EXPORT_SYMBOL(ib_init_ah_attr_from_path);
1329
alloc_mad(struct ib_sa_query * query,gfp_t gfp_mask)1330 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1331 {
1332 struct rdma_ah_attr ah_attr;
1333 unsigned long flags;
1334
1335 spin_lock_irqsave(&query->port->ah_lock, flags);
1336 if (!query->port->sm_ah) {
1337 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1338 return -EAGAIN;
1339 }
1340 kref_get(&query->port->sm_ah->ref);
1341 query->sm_ah = query->port->sm_ah;
1342 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1343
1344 /*
1345 * Always check if sm_ah has valid dlid assigned,
1346 * before querying for class port info
1347 */
1348 if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
1349 !rdma_is_valid_unicast_lid(&ah_attr)) {
1350 kref_put(&query->sm_ah->ref, free_sm_ah);
1351 return -EAGAIN;
1352 }
1353 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1354 query->sm_ah->pkey_index,
1355 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1356 gfp_mask,
1357 ((query->flags & IB_SA_QUERY_OPA) ?
1358 OPA_MGMT_BASE_VERSION :
1359 IB_MGMT_BASE_VERSION));
1360 if (IS_ERR(query->mad_buf)) {
1361 kref_put(&query->sm_ah->ref, free_sm_ah);
1362 return -ENOMEM;
1363 }
1364
1365 query->mad_buf->ah = query->sm_ah->ah;
1366
1367 return 0;
1368 }
1369
free_mad(struct ib_sa_query * query)1370 static void free_mad(struct ib_sa_query *query)
1371 {
1372 ib_free_send_mad(query->mad_buf);
1373 kref_put(&query->sm_ah->ref, free_sm_ah);
1374 }
1375
init_mad(struct ib_sa_query * query,struct ib_mad_agent * agent)1376 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1377 {
1378 struct ib_sa_mad *mad = query->mad_buf->mad;
1379 unsigned long flags;
1380
1381 memset(mad, 0, sizeof *mad);
1382
1383 if (query->flags & IB_SA_QUERY_OPA) {
1384 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
1385 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1386 } else {
1387 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1388 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1389 }
1390 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1391 spin_lock_irqsave(&tid_lock, flags);
1392 mad->mad_hdr.tid =
1393 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1394 spin_unlock_irqrestore(&tid_lock, flags);
1395 }
1396
send_mad(struct ib_sa_query * query,unsigned long timeout_ms,gfp_t gfp_mask)1397 static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
1398 gfp_t gfp_mask)
1399 {
1400 unsigned long flags;
1401 int ret, id;
1402 const int nmbr_sa_query_retries = 10;
1403
1404 xa_lock_irqsave(&queries, flags);
1405 ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
1406 xa_unlock_irqrestore(&queries, flags);
1407 if (ret < 0)
1408 return ret;
1409
1410 query->mad_buf->timeout_ms = timeout_ms / nmbr_sa_query_retries;
1411 query->mad_buf->retries = nmbr_sa_query_retries;
1412 if (!query->mad_buf->timeout_ms) {
1413 /* Special case, very small timeout_ms */
1414 query->mad_buf->timeout_ms = 1;
1415 query->mad_buf->retries = timeout_ms;
1416 }
1417 query->mad_buf->context[0] = query;
1418 query->id = id;
1419
1420 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
1421 (!(query->flags & IB_SA_QUERY_OPA))) {
1422 if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
1423 if (!ib_nl_make_request(query, gfp_mask))
1424 return id;
1425 }
1426 ib_sa_disable_local_svc(query);
1427 }
1428
1429 ret = ib_post_send_mad(query->mad_buf, NULL);
1430 if (ret) {
1431 xa_lock_irqsave(&queries, flags);
1432 __xa_erase(&queries, id);
1433 xa_unlock_irqrestore(&queries, flags);
1434 }
1435
1436 /*
1437 * It's not safe to dereference query any more, because the
1438 * send may already have completed and freed the query in
1439 * another context.
1440 */
1441 return ret ? ret : id;
1442 }
1443
ib_sa_unpack_path(void * attribute,struct sa_path_rec * rec)1444 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1445 {
1446 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1447 }
1448 EXPORT_SYMBOL(ib_sa_unpack_path);
1449
ib_sa_pack_path(struct sa_path_rec * rec,void * attribute)1450 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1451 {
1452 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1453 }
1454 EXPORT_SYMBOL(ib_sa_pack_path);
1455
ib_sa_pack_service(struct sa_service_rec * rec,void * attribute)1456 void ib_sa_pack_service(struct sa_service_rec *rec, void *attribute)
1457 {
1458 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), rec,
1459 attribute);
1460 }
1461 EXPORT_SYMBOL(ib_sa_pack_service);
1462
ib_sa_unpack_service(void * attribute,struct sa_service_rec * rec)1463 void ib_sa_unpack_service(void *attribute, struct sa_service_rec *rec)
1464 {
1465 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), attribute,
1466 rec);
1467 }
1468 EXPORT_SYMBOL(ib_sa_unpack_service);
1469
ib_sa_opa_pathrecord_support(struct ib_sa_client * client,struct ib_sa_device * sa_dev,u32 port_num)1470 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1471 struct ib_sa_device *sa_dev,
1472 u32 port_num)
1473 {
1474 struct ib_sa_port *port;
1475 unsigned long flags;
1476 bool ret = false;
1477
1478 port = &sa_dev->port[port_num - sa_dev->start_port];
1479 spin_lock_irqsave(&port->classport_lock, flags);
1480 if (!port->classport_info.valid)
1481 goto ret;
1482
1483 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
1484 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
1485 OPA_CLASS_PORT_INFO_PR_SUPPORT;
1486 ret:
1487 spin_unlock_irqrestore(&port->classport_lock, flags);
1488 return ret;
1489 }
1490
1491 enum opa_pr_supported {
1492 PR_NOT_SUPPORTED,
1493 PR_OPA_SUPPORTED,
1494 PR_IB_SUPPORTED
1495 };
1496
1497 /*
1498 * opa_pr_query_possible - Check if current PR query can be an OPA query.
1499 *
1500 * Returns PR_NOT_SUPPORTED if a path record query is not
1501 * possible, PR_OPA_SUPPORTED if an OPA path record query
1502 * is possible and PR_IB_SUPPORTED if an IB path record
1503 * query is possible.
1504 */
opa_pr_query_possible(struct ib_sa_client * client,struct ib_sa_device * sa_dev,struct ib_device * device,u32 port_num)1505 static int opa_pr_query_possible(struct ib_sa_client *client,
1506 struct ib_sa_device *sa_dev,
1507 struct ib_device *device, u32 port_num)
1508 {
1509 struct ib_port_attr port_attr;
1510
1511 if (ib_query_port(device, port_num, &port_attr))
1512 return PR_NOT_SUPPORTED;
1513
1514 if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num))
1515 return PR_OPA_SUPPORTED;
1516
1517 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1518 return PR_NOT_SUPPORTED;
1519 else
1520 return PR_IB_SUPPORTED;
1521 }
1522
ib_sa_path_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)1523 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1524 int status, struct ib_sa_mad *mad)
1525 {
1526 struct ib_sa_path_query *query =
1527 container_of(sa_query, struct ib_sa_path_query, sa_query);
1528 struct sa_path_rec rec = {};
1529
1530 if (!mad) {
1531 query->callback(status, NULL, 0, query->context);
1532 return;
1533 }
1534
1535 if (sa_query->flags & IB_SA_QUERY_OPA) {
1536 ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1537 mad->data, &rec);
1538 rec.rec_type = SA_PATH_REC_TYPE_OPA;
1539 query->callback(status, &rec, 1, query->context);
1540 return;
1541 }
1542
1543 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
1544 mad->data, &rec);
1545 rec.rec_type = SA_PATH_REC_TYPE_IB;
1546 sa_path_set_dmac_zero(&rec);
1547
1548 if (query->conv_pr) {
1549 struct sa_path_rec opa;
1550
1551 memset(&opa, 0, sizeof(struct sa_path_rec));
1552 sa_convert_path_ib_to_opa(&opa, &rec);
1553 query->callback(status, &opa, 1, query->context);
1554 } else {
1555 query->callback(status, &rec, 1, query->context);
1556 }
1557 }
1558
1559 #define IB_SA_DATA_OFFS 56
1560 #define IB_SERVICE_REC_SZ 176
1561
ib_unpack_service_rmpp(struct sa_service_rec * rec,struct ib_mad_recv_wc * mad_wc,int num_services)1562 static void ib_unpack_service_rmpp(struct sa_service_rec *rec,
1563 struct ib_mad_recv_wc *mad_wc,
1564 int num_services)
1565 {
1566 unsigned int cp_sz, data_i, data_size, rec_i = 0, buf_i = 0;
1567 struct ib_mad_recv_buf *mad_buf;
1568 u8 buf[IB_SERVICE_REC_SZ];
1569 u8 *data;
1570
1571 data_size = sizeof(((struct ib_sa_mad *) mad_buf->mad)->data);
1572
1573 list_for_each_entry(mad_buf, &mad_wc->rmpp_list, list) {
1574 data = ((struct ib_sa_mad *) mad_buf->mad)->data;
1575 data_i = 0;
1576 while (data_i < data_size && rec_i < num_services) {
1577 cp_sz = min(IB_SERVICE_REC_SZ - buf_i,
1578 data_size - data_i);
1579 memcpy(buf + buf_i, data + data_i, cp_sz);
1580 data_i += cp_sz;
1581 buf_i += cp_sz;
1582 if (buf_i == IB_SERVICE_REC_SZ) {
1583 ib_sa_unpack_service(buf, rec + rec_i);
1584 buf_i = 0;
1585 rec_i++;
1586 }
1587 }
1588 }
1589 }
1590
ib_sa_service_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_mad_recv_wc * mad_wc)1591 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, int status,
1592 struct ib_mad_recv_wc *mad_wc)
1593 {
1594 struct ib_sa_service_query *query =
1595 container_of(sa_query, struct ib_sa_service_query, sa_query);
1596 struct sa_service_rec *rec;
1597 int num_services;
1598
1599 if (!mad_wc || !mad_wc->recv_buf.mad) {
1600 query->callback(status, NULL, 0, query->context);
1601 return;
1602 }
1603
1604 num_services = (mad_wc->mad_len - IB_SA_DATA_OFFS) / IB_SERVICE_REC_SZ;
1605 if (!num_services) {
1606 query->callback(-ENODATA, NULL, 0, query->context);
1607 return;
1608 }
1609
1610 rec = kmalloc_objs(*rec, num_services);
1611 if (!rec) {
1612 query->callback(-ENOMEM, NULL, 0, query->context);
1613 return;
1614 }
1615
1616 ib_unpack_service_rmpp(rec, mad_wc, num_services);
1617 query->callback(status, rec, num_services, query->context);
1618 kfree(rec);
1619 }
1620
ib_sa_path_rec_release(struct ib_sa_query * sa_query)1621 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1622 {
1623 struct ib_sa_path_query *query =
1624 container_of(sa_query, struct ib_sa_path_query, sa_query);
1625
1626 kfree(query->conv_pr);
1627 kfree(query);
1628 }
1629
ib_sa_service_rec_release(struct ib_sa_query * sa_query)1630 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1631 {
1632 struct ib_sa_service_query *query =
1633 container_of(sa_query, struct ib_sa_service_query, sa_query);
1634
1635 kfree(query);
1636 }
1637
1638 /**
1639 * ib_sa_path_rec_get - Start a Path get query
1640 * @client:SA client
1641 * @device:device to send query on
1642 * @port_num: port number to send query on
1643 * @rec:Path Record to send in query
1644 * @comp_mask:component mask to send in query
1645 * @timeout_ms:time to wait for response
1646 * @gfp_mask:GFP mask to use for internal allocations
1647 * @callback:function called when query completes, times out or is
1648 * canceled
1649 * @context:opaque user context passed to callback
1650 * @sa_query:query context, used to cancel query
1651 *
1652 * Send a Path Record Get query to the SA to look up a path. The
1653 * callback function will be called when the query completes (or
1654 * fails); status is 0 for a successful response, -EINTR if the query
1655 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1656 * occurred sending the query. The resp parameter of the callback is
1657 * only valid if status is 0.
1658 *
1659 * If the return value of ib_sa_path_rec_get() is negative, it is an
1660 * error code. Otherwise it is a query ID that can be used to cancel
1661 * the query.
1662 */
ib_sa_path_rec_get(struct ib_sa_client * client,struct ib_device * device,u32 port_num,struct sa_path_rec * rec,ib_sa_comp_mask comp_mask,unsigned long timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct sa_path_rec * resp,unsigned int num_paths,void * context),void * context,struct ib_sa_query ** sa_query)1663 int ib_sa_path_rec_get(struct ib_sa_client *client,
1664 struct ib_device *device, u32 port_num,
1665 struct sa_path_rec *rec,
1666 ib_sa_comp_mask comp_mask,
1667 unsigned long timeout_ms, gfp_t gfp_mask,
1668 void (*callback)(int status,
1669 struct sa_path_rec *resp,
1670 unsigned int num_paths, void *context),
1671 void *context,
1672 struct ib_sa_query **sa_query)
1673 {
1674 struct ib_sa_path_query *query;
1675 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1676 struct ib_sa_port *port;
1677 struct ib_mad_agent *agent;
1678 struct ib_sa_mad *mad;
1679 enum opa_pr_supported status;
1680 int ret;
1681
1682 if (!sa_dev)
1683 return -ENODEV;
1684
1685 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
1686 (rec->rec_type != SA_PATH_REC_TYPE_OPA))
1687 return -EINVAL;
1688
1689 port = &sa_dev->port[port_num - sa_dev->start_port];
1690 agent = port->agent;
1691
1692 query = kzalloc_obj(*query, gfp_mask);
1693 if (!query)
1694 return -ENOMEM;
1695
1696 query->sa_query.port = port;
1697 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1698 status = opa_pr_query_possible(client, sa_dev, device, port_num);
1699 if (status == PR_NOT_SUPPORTED) {
1700 ret = -EINVAL;
1701 goto err1;
1702 } else if (status == PR_OPA_SUPPORTED) {
1703 query->sa_query.flags |= IB_SA_QUERY_OPA;
1704 } else {
1705 query->conv_pr = kmalloc_obj(*query->conv_pr, gfp_mask);
1706 if (!query->conv_pr) {
1707 ret = -ENOMEM;
1708 goto err1;
1709 }
1710 }
1711 }
1712
1713 ret = alloc_mad(&query->sa_query, gfp_mask);
1714 if (ret)
1715 goto err2;
1716
1717 ib_sa_client_get(client);
1718 query->sa_query.client = client;
1719 query->callback = callback;
1720 query->context = context;
1721
1722 mad = query->sa_query.mad_buf->mad;
1723 init_mad(&query->sa_query, agent);
1724
1725 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1726 query->sa_query.release = ib_sa_path_rec_release;
1727 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1728 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1729 mad->sa_hdr.comp_mask = comp_mask;
1730
1731 if (query->sa_query.flags & IB_SA_QUERY_OPA) {
1732 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1733 rec, mad->data);
1734 } else if (query->conv_pr) {
1735 sa_convert_path_opa_to_ib(query->conv_pr, rec);
1736 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1737 query->conv_pr, mad->data);
1738 } else {
1739 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1740 rec, mad->data);
1741 }
1742
1743 *sa_query = &query->sa_query;
1744
1745 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1746 query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
1747 query->conv_pr : rec;
1748
1749 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1750 if (ret < 0)
1751 goto err3;
1752
1753 return ret;
1754
1755 err3:
1756 *sa_query = NULL;
1757 ib_sa_client_put(query->sa_query.client);
1758 free_mad(&query->sa_query);
1759 err2:
1760 kfree(query->conv_pr);
1761 err1:
1762 kfree(query);
1763 return ret;
1764 }
1765 EXPORT_SYMBOL(ib_sa_path_rec_get);
1766
1767 /**
1768 * ib_sa_service_rec_get - Start a Service get query
1769 * @client: SA client
1770 * @device: device to send query on
1771 * @port_num: port number to send query on
1772 * @rec: Service Record to send in query
1773 * @comp_mask: component mask to send in query
1774 * @timeout_ms: time to wait for response
1775 * @gfp_mask: GFP mask to use for internal allocations
1776 * @callback: function called when query completes, times out or is
1777 * canceled
1778 * @context: opaque user context passed to callback
1779 * @sa_query: query context, used to cancel query
1780 *
1781 * Send a Service Record Get query to the SA to look up a path. The
1782 * callback function will be called when the query completes (or
1783 * fails); status is 0 for a successful response, -EINTR if the query
1784 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1785 * occurred sending the query. The resp parameter of the callback is
1786 * only valid if status is 0.
1787 *
1788 * If the return value of ib_sa_service_rec_get() is negative, it is an
1789 * error code. Otherwise it is a query ID that can be used to cancel
1790 * the query.
1791 */
ib_sa_service_rec_get(struct ib_sa_client * client,struct ib_device * device,u32 port_num,struct sa_service_rec * rec,ib_sa_comp_mask comp_mask,unsigned long timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct sa_service_rec * resp,unsigned int num_services,void * context),void * context,struct ib_sa_query ** sa_query)1792 int ib_sa_service_rec_get(struct ib_sa_client *client,
1793 struct ib_device *device, u32 port_num,
1794 struct sa_service_rec *rec,
1795 ib_sa_comp_mask comp_mask,
1796 unsigned long timeout_ms, gfp_t gfp_mask,
1797 void (*callback)(int status,
1798 struct sa_service_rec *resp,
1799 unsigned int num_services,
1800 void *context),
1801 void *context, struct ib_sa_query **sa_query)
1802 {
1803 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1804 struct ib_sa_service_query *query;
1805 struct ib_mad_agent *agent;
1806 struct ib_sa_port *port;
1807 struct ib_sa_mad *mad;
1808 int ret;
1809
1810 if (!sa_dev)
1811 return -ENODEV;
1812
1813 port = &sa_dev->port[port_num - sa_dev->start_port];
1814 agent = port->agent;
1815
1816 query = kzalloc_obj(*query, gfp_mask);
1817 if (!query)
1818 return -ENOMEM;
1819
1820 query->sa_query.port = port;
1821
1822 ret = alloc_mad(&query->sa_query, gfp_mask);
1823 if (ret)
1824 goto err1;
1825
1826 ib_sa_client_get(client);
1827 query->sa_query.client = client;
1828 query->callback = callback;
1829 query->context = context;
1830
1831 mad = query->sa_query.mad_buf->mad;
1832 init_mad(&query->sa_query, agent);
1833
1834 query->sa_query.rmpp_callback = callback ? ib_sa_service_rec_callback :
1835 NULL;
1836 query->sa_query.release = ib_sa_service_rec_release;
1837 mad->mad_hdr.method = IB_MGMT_METHOD_GET_TABLE;
1838 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1839 mad->sa_hdr.comp_mask = comp_mask;
1840
1841 ib_sa_pack_service(rec, mad->data);
1842
1843 *sa_query = &query->sa_query;
1844 query->sa_query.mad_buf->context[1] = rec;
1845
1846 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1847 if (ret < 0)
1848 goto err2;
1849
1850 return ret;
1851
1852 err2:
1853 *sa_query = NULL;
1854 ib_sa_client_put(query->sa_query.client);
1855 free_mad(&query->sa_query);
1856 err1:
1857 kfree(query);
1858 return ret;
1859 }
1860 EXPORT_SYMBOL(ib_sa_service_rec_get);
1861
ib_sa_mcmember_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)1862 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1863 int status, struct ib_sa_mad *mad)
1864 {
1865 struct ib_sa_mcmember_query *query =
1866 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1867
1868 if (mad) {
1869 struct ib_sa_mcmember_rec rec;
1870
1871 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1872 mad->data, &rec);
1873 query->callback(status, &rec, query->context);
1874 } else
1875 query->callback(status, NULL, query->context);
1876 }
1877
ib_sa_mcmember_rec_release(struct ib_sa_query * sa_query)1878 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1879 {
1880 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1881 }
1882
ib_sa_mcmember_rec_query(struct ib_sa_client * client,struct ib_device * device,u32 port_num,u8 method,struct ib_sa_mcmember_rec * rec,ib_sa_comp_mask comp_mask,unsigned long timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct ib_sa_mcmember_rec * resp,void * context),void * context,struct ib_sa_query ** sa_query)1883 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1884 struct ib_device *device, u32 port_num,
1885 u8 method,
1886 struct ib_sa_mcmember_rec *rec,
1887 ib_sa_comp_mask comp_mask,
1888 unsigned long timeout_ms, gfp_t gfp_mask,
1889 void (*callback)(int status,
1890 struct ib_sa_mcmember_rec *resp,
1891 void *context),
1892 void *context,
1893 struct ib_sa_query **sa_query)
1894 {
1895 struct ib_sa_mcmember_query *query;
1896 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1897 struct ib_sa_port *port;
1898 struct ib_mad_agent *agent;
1899 struct ib_sa_mad *mad;
1900 int ret;
1901
1902 if (!sa_dev)
1903 return -ENODEV;
1904
1905 port = &sa_dev->port[port_num - sa_dev->start_port];
1906 agent = port->agent;
1907
1908 query = kzalloc_obj(*query, gfp_mask);
1909 if (!query)
1910 return -ENOMEM;
1911
1912 query->sa_query.port = port;
1913 ret = alloc_mad(&query->sa_query, gfp_mask);
1914 if (ret)
1915 goto err1;
1916
1917 ib_sa_client_get(client);
1918 query->sa_query.client = client;
1919 query->callback = callback;
1920 query->context = context;
1921
1922 mad = query->sa_query.mad_buf->mad;
1923 init_mad(&query->sa_query, agent);
1924
1925 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1926 query->sa_query.release = ib_sa_mcmember_rec_release;
1927 mad->mad_hdr.method = method;
1928 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1929 mad->sa_hdr.comp_mask = comp_mask;
1930
1931 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1932 rec, mad->data);
1933
1934 *sa_query = &query->sa_query;
1935
1936 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1937 if (ret < 0)
1938 goto err2;
1939
1940 return ret;
1941
1942 err2:
1943 *sa_query = NULL;
1944 ib_sa_client_put(query->sa_query.client);
1945 free_mad(&query->sa_query);
1946
1947 err1:
1948 kfree(query);
1949 return ret;
1950 }
1951
1952 /* Support GuidInfoRecord */
ib_sa_guidinfo_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)1953 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1954 int status, struct ib_sa_mad *mad)
1955 {
1956 struct ib_sa_guidinfo_query *query =
1957 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1958
1959 if (mad) {
1960 struct ib_sa_guidinfo_rec rec;
1961
1962 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1963 mad->data, &rec);
1964 query->callback(status, &rec, query->context);
1965 } else
1966 query->callback(status, NULL, query->context);
1967 }
1968
ib_sa_guidinfo_rec_release(struct ib_sa_query * sa_query)1969 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1970 {
1971 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1972 }
1973
ib_sa_guid_info_rec_query(struct ib_sa_client * client,struct ib_device * device,u32 port_num,struct ib_sa_guidinfo_rec * rec,ib_sa_comp_mask comp_mask,u8 method,unsigned long timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct ib_sa_guidinfo_rec * resp,void * context),void * context,struct ib_sa_query ** sa_query)1974 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1975 struct ib_device *device, u32 port_num,
1976 struct ib_sa_guidinfo_rec *rec,
1977 ib_sa_comp_mask comp_mask, u8 method,
1978 unsigned long timeout_ms, gfp_t gfp_mask,
1979 void (*callback)(int status,
1980 struct ib_sa_guidinfo_rec *resp,
1981 void *context),
1982 void *context,
1983 struct ib_sa_query **sa_query)
1984 {
1985 struct ib_sa_guidinfo_query *query;
1986 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1987 struct ib_sa_port *port;
1988 struct ib_mad_agent *agent;
1989 struct ib_sa_mad *mad;
1990 int ret;
1991
1992 if (!sa_dev)
1993 return -ENODEV;
1994
1995 if (method != IB_MGMT_METHOD_GET &&
1996 method != IB_MGMT_METHOD_SET &&
1997 method != IB_SA_METHOD_DELETE) {
1998 return -EINVAL;
1999 }
2000
2001 port = &sa_dev->port[port_num - sa_dev->start_port];
2002 agent = port->agent;
2003
2004 query = kzalloc_obj(*query, gfp_mask);
2005 if (!query)
2006 return -ENOMEM;
2007
2008 query->sa_query.port = port;
2009 ret = alloc_mad(&query->sa_query, gfp_mask);
2010 if (ret)
2011 goto err1;
2012
2013 ib_sa_client_get(client);
2014 query->sa_query.client = client;
2015 query->callback = callback;
2016 query->context = context;
2017
2018 mad = query->sa_query.mad_buf->mad;
2019 init_mad(&query->sa_query, agent);
2020
2021 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
2022 query->sa_query.release = ib_sa_guidinfo_rec_release;
2023
2024 mad->mad_hdr.method = method;
2025 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
2026 mad->sa_hdr.comp_mask = comp_mask;
2027
2028 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
2029 mad->data);
2030
2031 *sa_query = &query->sa_query;
2032
2033 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
2034 if (ret < 0)
2035 goto err2;
2036
2037 return ret;
2038
2039 err2:
2040 *sa_query = NULL;
2041 ib_sa_client_put(query->sa_query.client);
2042 free_mad(&query->sa_query);
2043
2044 err1:
2045 kfree(query);
2046 return ret;
2047 }
2048 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
2049
2050 struct ib_classport_info_context {
2051 struct completion done;
2052 struct ib_sa_query *sa_query;
2053 };
2054
ib_classportinfo_cb(void * context)2055 static void ib_classportinfo_cb(void *context)
2056 {
2057 struct ib_classport_info_context *cb_ctx = context;
2058
2059 complete(&cb_ctx->done);
2060 }
2061
ib_sa_classport_info_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)2062 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
2063 int status, struct ib_sa_mad *mad)
2064 {
2065 unsigned long flags;
2066 struct ib_sa_classport_info_query *query =
2067 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
2068 struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
2069
2070 if (mad) {
2071 if (sa_query->flags & IB_SA_QUERY_OPA) {
2072 struct opa_class_port_info rec;
2073
2074 ib_unpack(opa_classport_info_rec_table,
2075 ARRAY_SIZE(opa_classport_info_rec_table),
2076 mad->data, &rec);
2077
2078 spin_lock_irqsave(&sa_query->port->classport_lock,
2079 flags);
2080 if (!status && !info->valid) {
2081 memcpy(&info->data.opa, &rec,
2082 sizeof(info->data.opa));
2083
2084 info->valid = true;
2085 info->data.type = RDMA_CLASS_PORT_INFO_OPA;
2086 }
2087 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2088 flags);
2089
2090 } else {
2091 struct ib_class_port_info rec;
2092
2093 ib_unpack(ib_classport_info_rec_table,
2094 ARRAY_SIZE(ib_classport_info_rec_table),
2095 mad->data, &rec);
2096
2097 spin_lock_irqsave(&sa_query->port->classport_lock,
2098 flags);
2099 if (!status && !info->valid) {
2100 memcpy(&info->data.ib, &rec,
2101 sizeof(info->data.ib));
2102
2103 info->valid = true;
2104 info->data.type = RDMA_CLASS_PORT_INFO_IB;
2105 }
2106 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2107 flags);
2108 }
2109 }
2110 query->callback(query->context);
2111 }
2112
ib_sa_classport_info_rec_release(struct ib_sa_query * sa_query)2113 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
2114 {
2115 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
2116 sa_query));
2117 }
2118
ib_sa_classport_info_rec_query(struct ib_sa_port * port,unsigned long timeout_ms,void (* callback)(void * context),void * context,struct ib_sa_query ** sa_query)2119 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
2120 unsigned long timeout_ms,
2121 void (*callback)(void *context),
2122 void *context,
2123 struct ib_sa_query **sa_query)
2124 {
2125 struct ib_mad_agent *agent;
2126 struct ib_sa_classport_info_query *query;
2127 struct ib_sa_mad *mad;
2128 gfp_t gfp_mask = GFP_KERNEL;
2129 int ret;
2130
2131 agent = port->agent;
2132
2133 query = kzalloc_obj(*query, gfp_mask);
2134 if (!query)
2135 return -ENOMEM;
2136
2137 query->sa_query.port = port;
2138 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
2139 port->port_num) ?
2140 IB_SA_QUERY_OPA : 0;
2141 ret = alloc_mad(&query->sa_query, gfp_mask);
2142 if (ret)
2143 goto err_free;
2144
2145 query->callback = callback;
2146 query->context = context;
2147
2148 mad = query->sa_query.mad_buf->mad;
2149 init_mad(&query->sa_query, agent);
2150
2151 query->sa_query.callback = ib_sa_classport_info_rec_callback;
2152 query->sa_query.release = ib_sa_classport_info_rec_release;
2153 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
2154 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
2155 mad->sa_hdr.comp_mask = 0;
2156 *sa_query = &query->sa_query;
2157
2158 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
2159 if (ret < 0)
2160 goto err_free_mad;
2161
2162 return ret;
2163
2164 err_free_mad:
2165 *sa_query = NULL;
2166 free_mad(&query->sa_query);
2167
2168 err_free:
2169 kfree(query);
2170 return ret;
2171 }
2172
update_ib_cpi(struct work_struct * work)2173 static void update_ib_cpi(struct work_struct *work)
2174 {
2175 struct ib_sa_port *port =
2176 container_of(work, struct ib_sa_port, ib_cpi_work.work);
2177 struct ib_classport_info_context *cb_context;
2178 unsigned long flags;
2179 int ret;
2180
2181 /* If the classport info is valid, nothing
2182 * to do here.
2183 */
2184 spin_lock_irqsave(&port->classport_lock, flags);
2185 if (port->classport_info.valid) {
2186 spin_unlock_irqrestore(&port->classport_lock, flags);
2187 return;
2188 }
2189 spin_unlock_irqrestore(&port->classport_lock, flags);
2190
2191 cb_context = kmalloc_obj(*cb_context);
2192 if (!cb_context)
2193 goto err_nomem;
2194
2195 init_completion(&cb_context->done);
2196
2197 ret = ib_sa_classport_info_rec_query(port, 3000,
2198 ib_classportinfo_cb, cb_context,
2199 &cb_context->sa_query);
2200 if (ret < 0)
2201 goto free_cb_err;
2202 wait_for_completion(&cb_context->done);
2203 free_cb_err:
2204 kfree(cb_context);
2205 spin_lock_irqsave(&port->classport_lock, flags);
2206
2207 /* If the classport info is still not valid, the query should have
2208 * failed for some reason. Retry issuing the query
2209 */
2210 if (!port->classport_info.valid) {
2211 port->classport_info.retry_cnt++;
2212 if (port->classport_info.retry_cnt <=
2213 IB_SA_CPI_MAX_RETRY_CNT) {
2214 unsigned long delay =
2215 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2216
2217 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
2218 }
2219 }
2220 spin_unlock_irqrestore(&port->classport_lock, flags);
2221
2222 err_nomem:
2223 return;
2224 }
2225
send_handler(struct ib_mad_agent * agent,struct ib_mad_send_wc * mad_send_wc)2226 static void send_handler(struct ib_mad_agent *agent,
2227 struct ib_mad_send_wc *mad_send_wc)
2228 {
2229 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
2230 unsigned long flags;
2231 int status = 0;
2232
2233 if (query->callback || query->rmpp_callback) {
2234 switch (mad_send_wc->status) {
2235 case IB_WC_SUCCESS:
2236 /* No callback -- already got recv */
2237 break;
2238 case IB_WC_RESP_TIMEOUT_ERR:
2239 status = -ETIMEDOUT;
2240 break;
2241 case IB_WC_WR_FLUSH_ERR:
2242 status = -EINTR;
2243 break;
2244 default:
2245 status = -EIO;
2246 break;
2247 }
2248
2249 if (status)
2250 query->callback ? query->callback(query, status, NULL) :
2251 query->rmpp_callback(query, status, NULL);
2252 }
2253
2254 xa_lock_irqsave(&queries, flags);
2255 __xa_erase(&queries, query->id);
2256 xa_unlock_irqrestore(&queries, flags);
2257
2258 free_mad(query);
2259 if (query->client)
2260 ib_sa_client_put(query->client);
2261 query->release(query);
2262 }
2263
recv_handler(struct ib_mad_agent * mad_agent,struct ib_mad_send_buf * send_buf,struct ib_mad_recv_wc * mad_recv_wc)2264 static void recv_handler(struct ib_mad_agent *mad_agent,
2265 struct ib_mad_send_buf *send_buf,
2266 struct ib_mad_recv_wc *mad_recv_wc)
2267 {
2268 struct ib_sa_query *query;
2269 struct ib_mad *mad;
2270
2271
2272 if (!send_buf)
2273 return;
2274
2275 query = send_buf->context[0];
2276 mad = mad_recv_wc->recv_buf.mad;
2277
2278 if (query->rmpp_callback) {
2279 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2280 query->rmpp_callback(query, mad->mad_hdr.status ?
2281 -EINVAL : 0, mad_recv_wc);
2282 else
2283 query->rmpp_callback(query, -EIO, NULL);
2284 } else if (query->callback) {
2285 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2286 query->callback(query, mad->mad_hdr.status ?
2287 -EINVAL : 0, (struct ib_sa_mad *)mad);
2288 else
2289 query->callback(query, -EIO, NULL);
2290 }
2291
2292 ib_free_recv_mad(mad_recv_wc);
2293 }
2294
update_sm_ah(struct work_struct * work)2295 static void update_sm_ah(struct work_struct *work)
2296 {
2297 struct ib_sa_port *port =
2298 container_of(work, struct ib_sa_port, update_task);
2299 struct ib_sa_sm_ah *new_ah;
2300 struct ib_port_attr port_attr;
2301 struct rdma_ah_attr ah_attr;
2302 bool grh_required;
2303
2304 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2305 pr_warn("Couldn't query port\n");
2306 return;
2307 }
2308
2309 new_ah = kmalloc_obj(*new_ah);
2310 if (!new_ah)
2311 return;
2312
2313 kref_init(&new_ah->ref);
2314 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2315
2316 new_ah->pkey_index = 0;
2317 if (ib_find_pkey(port->agent->device, port->port_num,
2318 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2319 pr_err("Couldn't find index for default PKey\n");
2320
2321 memset(&ah_attr, 0, sizeof(ah_attr));
2322 ah_attr.type = rdma_ah_find_type(port->agent->device,
2323 port->port_num);
2324 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2325 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2326 rdma_ah_set_port_num(&ah_attr, port->port_num);
2327
2328 grh_required = rdma_is_grh_required(port->agent->device,
2329 port->port_num);
2330
2331 /*
2332 * The OPA sm_lid of 0xFFFF needs special handling so that it can be
2333 * differentiated from a permissive LID of 0xFFFF. We set the
2334 * grh_required flag here so the SA can program the DGID in the
2335 * address handle appropriately
2336 */
2337 if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA &&
2338 (grh_required ||
2339 port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE)))
2340 rdma_ah_set_make_grd(&ah_attr, true);
2341
2342 if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) {
2343 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2344 rdma_ah_set_subnet_prefix(&ah_attr,
2345 cpu_to_be64(port_attr.subnet_prefix));
2346 rdma_ah_set_interface_id(&ah_attr,
2347 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2348 }
2349
2350 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr,
2351 RDMA_CREATE_AH_SLEEPABLE);
2352 if (IS_ERR(new_ah->ah)) {
2353 pr_warn("Couldn't create new SM AH\n");
2354 kfree(new_ah);
2355 return;
2356 }
2357
2358 spin_lock_irq(&port->ah_lock);
2359 if (port->sm_ah)
2360 kref_put(&port->sm_ah->ref, free_sm_ah);
2361 port->sm_ah = new_ah;
2362 spin_unlock_irq(&port->ah_lock);
2363 }
2364
ib_sa_event(struct ib_event_handler * handler,struct ib_event * event)2365 static void ib_sa_event(struct ib_event_handler *handler,
2366 struct ib_event *event)
2367 {
2368 if (event->event == IB_EVENT_PORT_ERR ||
2369 event->event == IB_EVENT_PORT_ACTIVE ||
2370 event->event == IB_EVENT_LID_CHANGE ||
2371 event->event == IB_EVENT_PKEY_CHANGE ||
2372 event->event == IB_EVENT_SM_CHANGE ||
2373 event->event == IB_EVENT_CLIENT_REREGISTER) {
2374 unsigned long flags;
2375 struct ib_sa_device *sa_dev =
2376 container_of(handler, typeof(*sa_dev), event_handler);
2377 u32 port_num = event->element.port_num - sa_dev->start_port;
2378 struct ib_sa_port *port = &sa_dev->port[port_num];
2379
2380 if (!rdma_cap_ib_sa(handler->device, port->port_num))
2381 return;
2382
2383 spin_lock_irqsave(&port->ah_lock, flags);
2384 if (port->sm_ah)
2385 kref_put(&port->sm_ah->ref, free_sm_ah);
2386 port->sm_ah = NULL;
2387 spin_unlock_irqrestore(&port->ah_lock, flags);
2388
2389 if (event->event == IB_EVENT_SM_CHANGE ||
2390 event->event == IB_EVENT_CLIENT_REREGISTER ||
2391 event->event == IB_EVENT_LID_CHANGE ||
2392 event->event == IB_EVENT_PORT_ACTIVE) {
2393 unsigned long delay =
2394 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2395
2396 spin_lock_irqsave(&port->classport_lock, flags);
2397 port->classport_info.valid = false;
2398 port->classport_info.retry_cnt = 0;
2399 spin_unlock_irqrestore(&port->classport_lock, flags);
2400 queue_delayed_work(ib_wq,
2401 &port->ib_cpi_work, delay);
2402 }
2403 queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2404 }
2405 }
2406
ib_sa_add_one(struct ib_device * device)2407 static int ib_sa_add_one(struct ib_device *device)
2408 {
2409 struct ib_sa_device *sa_dev;
2410 int s, e, i;
2411 int count = 0;
2412 int ret;
2413
2414 s = rdma_start_port(device);
2415 e = rdma_end_port(device);
2416
2417 sa_dev = kzalloc_flex(*sa_dev, port, size_add(size_sub(e, s), 1));
2418 if (!sa_dev)
2419 return -ENOMEM;
2420
2421 sa_dev->start_port = s;
2422 sa_dev->end_port = e;
2423
2424 for (i = 0; i <= e - s; ++i) {
2425 spin_lock_init(&sa_dev->port[i].ah_lock);
2426 if (!rdma_cap_ib_sa(device, i + 1))
2427 continue;
2428
2429 sa_dev->port[i].sm_ah = NULL;
2430 sa_dev->port[i].port_num = i + s;
2431
2432 spin_lock_init(&sa_dev->port[i].classport_lock);
2433 sa_dev->port[i].classport_info.valid = false;
2434
2435 sa_dev->port[i].agent =
2436 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2437 NULL, IB_MGMT_RMPP_VERSION,
2438 send_handler, recv_handler,
2439 sa_dev, 0);
2440 if (IS_ERR(sa_dev->port[i].agent)) {
2441 ret = PTR_ERR(sa_dev->port[i].agent);
2442 goto err;
2443 }
2444
2445 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2446 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2447 update_ib_cpi);
2448
2449 count++;
2450 }
2451
2452 if (!count) {
2453 ret = -EOPNOTSUPP;
2454 goto free;
2455 }
2456
2457 ib_set_client_data(device, &sa_client, sa_dev);
2458
2459 /*
2460 * We register our event handler after everything is set up,
2461 * and then update our cached info after the event handler is
2462 * registered to avoid any problems if a port changes state
2463 * during our initialization.
2464 */
2465
2466 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2467 ib_register_event_handler(&sa_dev->event_handler);
2468
2469 for (i = 0; i <= e - s; ++i) {
2470 if (rdma_cap_ib_sa(device, i + 1))
2471 update_sm_ah(&sa_dev->port[i].update_task);
2472 }
2473
2474 return 0;
2475
2476 err:
2477 while (--i >= 0) {
2478 if (rdma_cap_ib_sa(device, i + 1))
2479 ib_unregister_mad_agent(sa_dev->port[i].agent);
2480 }
2481 free:
2482 kfree(sa_dev);
2483 return ret;
2484 }
2485
ib_sa_remove_one(struct ib_device * device,void * client_data)2486 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2487 {
2488 struct ib_sa_device *sa_dev = client_data;
2489 int i;
2490
2491 ib_unregister_event_handler(&sa_dev->event_handler);
2492 flush_workqueue(ib_wq);
2493
2494 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2495 if (rdma_cap_ib_sa(device, i + 1)) {
2496 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2497 ib_unregister_mad_agent(sa_dev->port[i].agent);
2498 if (sa_dev->port[i].sm_ah)
2499 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2500 }
2501
2502 }
2503
2504 kfree(sa_dev);
2505 }
2506
ib_sa_init(void)2507 int ib_sa_init(void)
2508 {
2509 int ret;
2510
2511 get_random_bytes(&tid, sizeof tid);
2512
2513 atomic_set(&ib_nl_sa_request_seq, 0);
2514
2515 ret = ib_register_client(&sa_client);
2516 if (ret) {
2517 pr_err("Couldn't register ib_sa client\n");
2518 goto err1;
2519 }
2520
2521 ret = mcast_init();
2522 if (ret) {
2523 pr_err("Couldn't initialize multicast handling\n");
2524 goto err2;
2525 }
2526
2527 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2528 if (!ib_nl_wq) {
2529 ret = -ENOMEM;
2530 goto err3;
2531 }
2532
2533 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2534
2535 return 0;
2536
2537 err3:
2538 mcast_cleanup();
2539 err2:
2540 ib_unregister_client(&sa_client);
2541 err1:
2542 return ret;
2543 }
2544
ib_sa_cleanup(void)2545 void ib_sa_cleanup(void)
2546 {
2547 cancel_delayed_work(&ib_nl_timed_work);
2548 destroy_workqueue(ib_nl_wq);
2549 mcast_cleanup();
2550 ib_unregister_client(&sa_client);
2551 WARN_ON(!xa_empty(&queries));
2552 }
2553