1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3 *
4 * Copyright (c) 2004 Topspin Communications. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2006 Intel Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37 #include <sys/cdefs.h>
38 #include <linux/module.h>
39 #include <linux/err.h>
40 #include <linux/random.h>
41 #include <linux/spinlock.h>
42 #include <linux/slab.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/kref.h>
45 #include <linux/idr.h>
46 #include <linux/workqueue.h>
47 #include <linux/etherdevice.h>
48 #include <rdma/ib_pack.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
52 #include <rdma/ib_addr.h>
53 #include "sa.h"
54 #include "core_priv.h"
55
56 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
57 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
58 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
59
60 struct ib_sa_sm_ah {
61 struct ib_ah *ah;
62 struct kref ref;
63 u16 pkey_index;
64 u8 src_path_mask;
65 };
66
67 struct ib_sa_classport_cache {
68 bool valid;
69 struct ib_class_port_info data;
70 };
71
72 struct ib_sa_port {
73 struct ib_mad_agent *agent;
74 struct ib_sa_sm_ah *sm_ah;
75 struct work_struct update_task;
76 struct ib_sa_classport_cache classport_info;
77 spinlock_t classport_lock; /* protects class port info set */
78 spinlock_t ah_lock;
79 u8 port_num;
80 };
81
82 struct ib_sa_device {
83 int start_port, end_port;
84 struct ib_event_handler event_handler;
85 struct ib_sa_port port[0];
86 };
87
88 struct ib_sa_query {
89 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
90 void (*release)(struct ib_sa_query *);
91 struct ib_sa_client *client;
92 struct ib_sa_port *port;
93 struct ib_mad_send_buf *mad_buf;
94 struct ib_sa_sm_ah *sm_ah;
95 int id;
96 u32 flags;
97 struct list_head list; /* Local svc request list */
98 u32 seq; /* Local svc request sequence number */
99 unsigned long timeout; /* Local svc timeout */
100 u8 path_use; /* How will the pathrecord be used */
101 };
102
103 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
104 #define IB_SA_CANCEL 0x00000002
105
106 struct ib_sa_service_query {
107 void (*callback)(int, struct ib_sa_service_rec *, void *);
108 void *context;
109 struct ib_sa_query sa_query;
110 };
111
112 struct ib_sa_path_query {
113 void (*callback)(int, struct ib_sa_path_rec *, void *);
114 void *context;
115 struct ib_sa_query sa_query;
116 };
117
118 struct ib_sa_guidinfo_query {
119 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
120 void *context;
121 struct ib_sa_query sa_query;
122 };
123
124 struct ib_sa_classport_info_query {
125 void (*callback)(int, struct ib_class_port_info *, void *);
126 void *context;
127 struct ib_sa_query sa_query;
128 };
129
130 struct ib_sa_mcmember_query {
131 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
132 void *context;
133 struct ib_sa_query sa_query;
134 };
135
136 static void ib_sa_add_one(struct ib_device *device);
137 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
138
139 static struct ib_client sa_client = {
140 .name = "sa",
141 .add = ib_sa_add_one,
142 .remove = ib_sa_remove_one
143 };
144
145 static DEFINE_SPINLOCK(idr_lock);
146 static DEFINE_IDR(query_idr);
147
148 static DEFINE_SPINLOCK(tid_lock);
149 static u32 tid;
150
151 #define PATH_REC_FIELD(field) \
152 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
153 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
154 .field_name = "sa_path_rec:" #field
155
156 static const struct ib_field path_rec_table[] = {
157 { PATH_REC_FIELD(service_id),
158 .offset_words = 0,
159 .offset_bits = 0,
160 .size_bits = 64 },
161 { PATH_REC_FIELD(dgid),
162 .offset_words = 2,
163 .offset_bits = 0,
164 .size_bits = 128 },
165 { PATH_REC_FIELD(sgid),
166 .offset_words = 6,
167 .offset_bits = 0,
168 .size_bits = 128 },
169 { PATH_REC_FIELD(dlid),
170 .offset_words = 10,
171 .offset_bits = 0,
172 .size_bits = 16 },
173 { PATH_REC_FIELD(slid),
174 .offset_words = 10,
175 .offset_bits = 16,
176 .size_bits = 16 },
177 { PATH_REC_FIELD(raw_traffic),
178 .offset_words = 11,
179 .offset_bits = 0,
180 .size_bits = 1 },
181 { RESERVED,
182 .offset_words = 11,
183 .offset_bits = 1,
184 .size_bits = 3 },
185 { PATH_REC_FIELD(flow_label),
186 .offset_words = 11,
187 .offset_bits = 4,
188 .size_bits = 20 },
189 { PATH_REC_FIELD(hop_limit),
190 .offset_words = 11,
191 .offset_bits = 24,
192 .size_bits = 8 },
193 { PATH_REC_FIELD(traffic_class),
194 .offset_words = 12,
195 .offset_bits = 0,
196 .size_bits = 8 },
197 { PATH_REC_FIELD(reversible),
198 .offset_words = 12,
199 .offset_bits = 8,
200 .size_bits = 1 },
201 { PATH_REC_FIELD(numb_path),
202 .offset_words = 12,
203 .offset_bits = 9,
204 .size_bits = 7 },
205 { PATH_REC_FIELD(pkey),
206 .offset_words = 12,
207 .offset_bits = 16,
208 .size_bits = 16 },
209 { PATH_REC_FIELD(qos_class),
210 .offset_words = 13,
211 .offset_bits = 0,
212 .size_bits = 12 },
213 { PATH_REC_FIELD(sl),
214 .offset_words = 13,
215 .offset_bits = 12,
216 .size_bits = 4 },
217 { PATH_REC_FIELD(mtu_selector),
218 .offset_words = 13,
219 .offset_bits = 16,
220 .size_bits = 2 },
221 { PATH_REC_FIELD(mtu),
222 .offset_words = 13,
223 .offset_bits = 18,
224 .size_bits = 6 },
225 { PATH_REC_FIELD(rate_selector),
226 .offset_words = 13,
227 .offset_bits = 24,
228 .size_bits = 2 },
229 { PATH_REC_FIELD(rate),
230 .offset_words = 13,
231 .offset_bits = 26,
232 .size_bits = 6 },
233 { PATH_REC_FIELD(packet_life_time_selector),
234 .offset_words = 14,
235 .offset_bits = 0,
236 .size_bits = 2 },
237 { PATH_REC_FIELD(packet_life_time),
238 .offset_words = 14,
239 .offset_bits = 2,
240 .size_bits = 6 },
241 { PATH_REC_FIELD(preference),
242 .offset_words = 14,
243 .offset_bits = 8,
244 .size_bits = 8 },
245 { RESERVED,
246 .offset_words = 14,
247 .offset_bits = 16,
248 .size_bits = 48 },
249 };
250
251 #define MCMEMBER_REC_FIELD(field) \
252 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
253 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
254 .field_name = "sa_mcmember_rec:" #field
255
256 static const struct ib_field mcmember_rec_table[] = {
257 { MCMEMBER_REC_FIELD(mgid),
258 .offset_words = 0,
259 .offset_bits = 0,
260 .size_bits = 128 },
261 { MCMEMBER_REC_FIELD(port_gid),
262 .offset_words = 4,
263 .offset_bits = 0,
264 .size_bits = 128 },
265 { MCMEMBER_REC_FIELD(qkey),
266 .offset_words = 8,
267 .offset_bits = 0,
268 .size_bits = 32 },
269 { MCMEMBER_REC_FIELD(mlid),
270 .offset_words = 9,
271 .offset_bits = 0,
272 .size_bits = 16 },
273 { MCMEMBER_REC_FIELD(mtu_selector),
274 .offset_words = 9,
275 .offset_bits = 16,
276 .size_bits = 2 },
277 { MCMEMBER_REC_FIELD(mtu),
278 .offset_words = 9,
279 .offset_bits = 18,
280 .size_bits = 6 },
281 { MCMEMBER_REC_FIELD(traffic_class),
282 .offset_words = 9,
283 .offset_bits = 24,
284 .size_bits = 8 },
285 { MCMEMBER_REC_FIELD(pkey),
286 .offset_words = 10,
287 .offset_bits = 0,
288 .size_bits = 16 },
289 { MCMEMBER_REC_FIELD(rate_selector),
290 .offset_words = 10,
291 .offset_bits = 16,
292 .size_bits = 2 },
293 { MCMEMBER_REC_FIELD(rate),
294 .offset_words = 10,
295 .offset_bits = 18,
296 .size_bits = 6 },
297 { MCMEMBER_REC_FIELD(packet_life_time_selector),
298 .offset_words = 10,
299 .offset_bits = 24,
300 .size_bits = 2 },
301 { MCMEMBER_REC_FIELD(packet_life_time),
302 .offset_words = 10,
303 .offset_bits = 26,
304 .size_bits = 6 },
305 { MCMEMBER_REC_FIELD(sl),
306 .offset_words = 11,
307 .offset_bits = 0,
308 .size_bits = 4 },
309 { MCMEMBER_REC_FIELD(flow_label),
310 .offset_words = 11,
311 .offset_bits = 4,
312 .size_bits = 20 },
313 { MCMEMBER_REC_FIELD(hop_limit),
314 .offset_words = 11,
315 .offset_bits = 24,
316 .size_bits = 8 },
317 { MCMEMBER_REC_FIELD(scope),
318 .offset_words = 12,
319 .offset_bits = 0,
320 .size_bits = 4 },
321 { MCMEMBER_REC_FIELD(join_state),
322 .offset_words = 12,
323 .offset_bits = 4,
324 .size_bits = 4 },
325 { MCMEMBER_REC_FIELD(proxy_join),
326 .offset_words = 12,
327 .offset_bits = 8,
328 .size_bits = 1 },
329 { RESERVED,
330 .offset_words = 12,
331 .offset_bits = 9,
332 .size_bits = 23 },
333 };
334
335 #define SERVICE_REC_FIELD(field) \
336 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
337 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
338 .field_name = "sa_service_rec:" #field
339
340 static const struct ib_field service_rec_table[] = {
341 { SERVICE_REC_FIELD(id),
342 .offset_words = 0,
343 .offset_bits = 0,
344 .size_bits = 64 },
345 { SERVICE_REC_FIELD(gid),
346 .offset_words = 2,
347 .offset_bits = 0,
348 .size_bits = 128 },
349 { SERVICE_REC_FIELD(pkey),
350 .offset_words = 6,
351 .offset_bits = 0,
352 .size_bits = 16 },
353 { SERVICE_REC_FIELD(lease),
354 .offset_words = 7,
355 .offset_bits = 0,
356 .size_bits = 32 },
357 { SERVICE_REC_FIELD(key),
358 .offset_words = 8,
359 .offset_bits = 0,
360 .size_bits = 128 },
361 { SERVICE_REC_FIELD(name),
362 .offset_words = 12,
363 .offset_bits = 0,
364 .size_bits = 64*8 },
365 { SERVICE_REC_FIELD(data8),
366 .offset_words = 28,
367 .offset_bits = 0,
368 .size_bits = 16*8 },
369 { SERVICE_REC_FIELD(data16),
370 .offset_words = 32,
371 .offset_bits = 0,
372 .size_bits = 8*16 },
373 { SERVICE_REC_FIELD(data32),
374 .offset_words = 36,
375 .offset_bits = 0,
376 .size_bits = 4*32 },
377 { SERVICE_REC_FIELD(data64),
378 .offset_words = 40,
379 .offset_bits = 0,
380 .size_bits = 2*64 },
381 };
382
383 #define CLASSPORTINFO_REC_FIELD(field) \
384 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
385 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
386 .field_name = "ib_class_port_info:" #field
387
388 static const struct ib_field classport_info_rec_table[] = {
389 { CLASSPORTINFO_REC_FIELD(base_version),
390 .offset_words = 0,
391 .offset_bits = 0,
392 .size_bits = 8 },
393 { CLASSPORTINFO_REC_FIELD(class_version),
394 .offset_words = 0,
395 .offset_bits = 8,
396 .size_bits = 8 },
397 { CLASSPORTINFO_REC_FIELD(capability_mask),
398 .offset_words = 0,
399 .offset_bits = 16,
400 .size_bits = 16 },
401 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
402 .offset_words = 1,
403 .offset_bits = 0,
404 .size_bits = 32 },
405 { CLASSPORTINFO_REC_FIELD(redirect_gid),
406 .offset_words = 2,
407 .offset_bits = 0,
408 .size_bits = 128 },
409 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
410 .offset_words = 6,
411 .offset_bits = 0,
412 .size_bits = 32 },
413 { CLASSPORTINFO_REC_FIELD(redirect_lid),
414 .offset_words = 7,
415 .offset_bits = 0,
416 .size_bits = 16 },
417 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
418 .offset_words = 7,
419 .offset_bits = 16,
420 .size_bits = 16 },
421
422 { CLASSPORTINFO_REC_FIELD(redirect_qp),
423 .offset_words = 8,
424 .offset_bits = 0,
425 .size_bits = 32 },
426 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
427 .offset_words = 9,
428 .offset_bits = 0,
429 .size_bits = 32 },
430
431 { CLASSPORTINFO_REC_FIELD(trap_gid),
432 .offset_words = 10,
433 .offset_bits = 0,
434 .size_bits = 128 },
435 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
436 .offset_words = 14,
437 .offset_bits = 0,
438 .size_bits = 32 },
439
440 { CLASSPORTINFO_REC_FIELD(trap_lid),
441 .offset_words = 15,
442 .offset_bits = 0,
443 .size_bits = 16 },
444 { CLASSPORTINFO_REC_FIELD(trap_pkey),
445 .offset_words = 15,
446 .offset_bits = 16,
447 .size_bits = 16 },
448
449 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
450 .offset_words = 16,
451 .offset_bits = 0,
452 .size_bits = 32 },
453 { CLASSPORTINFO_REC_FIELD(trap_qkey),
454 .offset_words = 17,
455 .offset_bits = 0,
456 .size_bits = 32 },
457 };
458
459 #define GUIDINFO_REC_FIELD(field) \
460 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
461 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
462 .field_name = "sa_guidinfo_rec:" #field
463
464 static const struct ib_field guidinfo_rec_table[] = {
465 { GUIDINFO_REC_FIELD(lid),
466 .offset_words = 0,
467 .offset_bits = 0,
468 .size_bits = 16 },
469 { GUIDINFO_REC_FIELD(block_num),
470 .offset_words = 0,
471 .offset_bits = 16,
472 .size_bits = 8 },
473 { GUIDINFO_REC_FIELD(res1),
474 .offset_words = 0,
475 .offset_bits = 24,
476 .size_bits = 8 },
477 { GUIDINFO_REC_FIELD(res2),
478 .offset_words = 1,
479 .offset_bits = 0,
480 .size_bits = 32 },
481 { GUIDINFO_REC_FIELD(guid_info_list),
482 .offset_words = 2,
483 .offset_bits = 0,
484 .size_bits = 512 },
485 };
486
ib_sa_disable_local_svc(struct ib_sa_query * query)487 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
488 {
489 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
490 }
491
free_sm_ah(struct kref * kref)492 static void free_sm_ah(struct kref *kref)
493 {
494 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
495
496 ib_destroy_ah(sm_ah->ah, 0);
497 kfree(sm_ah);
498 }
499
update_sm_ah(struct work_struct * work)500 static void update_sm_ah(struct work_struct *work)
501 {
502 struct ib_sa_port *port =
503 container_of(work, struct ib_sa_port, update_task);
504 struct ib_sa_sm_ah *new_ah;
505 struct ib_port_attr port_attr;
506 struct ib_ah_attr ah_attr;
507
508 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
509 pr_warn("Couldn't query port\n");
510 return;
511 }
512
513 new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
514 if (!new_ah) {
515 return;
516 }
517
518 kref_init(&new_ah->ref);
519 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
520
521 new_ah->pkey_index = 0;
522 if (ib_find_pkey(port->agent->device, port->port_num,
523 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
524 pr_err("Couldn't find index for default PKey\n");
525
526 memset(&ah_attr, 0, sizeof ah_attr);
527 ah_attr.dlid = port_attr.sm_lid;
528 ah_attr.sl = port_attr.sm_sl;
529 ah_attr.port_num = port->port_num;
530 if (port_attr.grh_required) {
531 ah_attr.ah_flags = IB_AH_GRH;
532 ah_attr.grh.dgid.global.subnet_prefix = cpu_to_be64(port_attr.subnet_prefix);
533 ah_attr.grh.dgid.global.interface_id = cpu_to_be64(IB_SA_WELL_KNOWN_GUID);
534 }
535
536 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
537 if (IS_ERR(new_ah->ah)) {
538 pr_warn("Couldn't create new SM AH\n");
539 kfree(new_ah);
540 return;
541 }
542
543 spin_lock_irq(&port->ah_lock);
544 if (port->sm_ah)
545 kref_put(&port->sm_ah->ref, free_sm_ah);
546 port->sm_ah = new_ah;
547 spin_unlock_irq(&port->ah_lock);
548
549 }
550
ib_sa_event(struct ib_event_handler * handler,struct ib_event * event)551 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
552 {
553 if (event->event == IB_EVENT_PORT_ERR ||
554 event->event == IB_EVENT_PORT_ACTIVE ||
555 event->event == IB_EVENT_LID_CHANGE ||
556 event->event == IB_EVENT_PKEY_CHANGE ||
557 event->event == IB_EVENT_SM_CHANGE ||
558 event->event == IB_EVENT_CLIENT_REREGISTER) {
559 unsigned long flags;
560 struct ib_sa_device *sa_dev =
561 container_of(handler, typeof(*sa_dev), event_handler);
562 struct ib_sa_port *port =
563 &sa_dev->port[event->element.port_num - sa_dev->start_port];
564
565 if (!rdma_cap_ib_sa(handler->device, port->port_num))
566 return;
567
568 spin_lock_irqsave(&port->ah_lock, flags);
569 if (port->sm_ah)
570 kref_put(&port->sm_ah->ref, free_sm_ah);
571 port->sm_ah = NULL;
572 spin_unlock_irqrestore(&port->ah_lock, flags);
573
574 if (event->event == IB_EVENT_SM_CHANGE ||
575 event->event == IB_EVENT_CLIENT_REREGISTER ||
576 event->event == IB_EVENT_LID_CHANGE) {
577 spin_lock_irqsave(&port->classport_lock, flags);
578 port->classport_info.valid = false;
579 spin_unlock_irqrestore(&port->classport_lock, flags);
580 }
581 queue_work(ib_wq, &sa_dev->port[event->element.port_num -
582 sa_dev->start_port].update_task);
583 }
584 }
585
ib_sa_register_client(struct ib_sa_client * client)586 void ib_sa_register_client(struct ib_sa_client *client)
587 {
588 atomic_set(&client->users, 1);
589 init_completion(&client->comp);
590 }
591 EXPORT_SYMBOL(ib_sa_register_client);
592
ib_sa_unregister_client(struct ib_sa_client * client)593 void ib_sa_unregister_client(struct ib_sa_client *client)
594 {
595 ib_sa_client_put(client);
596 wait_for_completion(&client->comp);
597 }
598 EXPORT_SYMBOL(ib_sa_unregister_client);
599
600 /**
601 * ib_sa_cancel_query - try to cancel an SA query
602 * @id:ID of query to cancel
603 * @query:query pointer to cancel
604 *
605 * Try to cancel an SA query. If the id and query don't match up or
606 * the query has already completed, nothing is done. Otherwise the
607 * query is canceled and will complete with a status of -EINTR.
608 */
ib_sa_cancel_query(int id,struct ib_sa_query * query)609 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
610 {
611 unsigned long flags;
612 struct ib_mad_agent *agent;
613 struct ib_mad_send_buf *mad_buf;
614
615 spin_lock_irqsave(&idr_lock, flags);
616 if (idr_find(&query_idr, id) != query) {
617 spin_unlock_irqrestore(&idr_lock, flags);
618 return;
619 }
620 agent = query->port->agent;
621 mad_buf = query->mad_buf;
622 spin_unlock_irqrestore(&idr_lock, flags);
623
624 ib_cancel_mad(agent, mad_buf);
625 }
626 EXPORT_SYMBOL(ib_sa_cancel_query);
627
get_src_path_mask(struct ib_device * device,u8 port_num)628 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
629 {
630 struct ib_sa_device *sa_dev;
631 struct ib_sa_port *port;
632 unsigned long flags;
633 u8 src_path_mask;
634
635 sa_dev = ib_get_client_data(device, &sa_client);
636 if (!sa_dev)
637 return 0x7f;
638
639 port = &sa_dev->port[port_num - sa_dev->start_port];
640 spin_lock_irqsave(&port->ah_lock, flags);
641 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
642 spin_unlock_irqrestore(&port->ah_lock, flags);
643
644 return src_path_mask;
645 }
646
ib_init_ah_from_path(struct ib_device * device,u8 port_num,struct ib_sa_path_rec * rec,struct ib_ah_attr * ah_attr)647 int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
648 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
649 {
650 int ret;
651 u16 gid_index;
652 int use_roce;
653 if_t ndev = NULL;
654
655 memset(ah_attr, 0, sizeof *ah_attr);
656 ah_attr->dlid = be16_to_cpu(rec->dlid);
657 ah_attr->sl = rec->sl;
658 ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
659 get_src_path_mask(device, port_num);
660 ah_attr->port_num = port_num;
661 ah_attr->static_rate = rec->rate;
662
663 use_roce = rdma_cap_eth_ah(device, port_num);
664
665 if (use_roce) {
666 if_t idev;
667 if_t resolved_dev;
668 struct rdma_dev_addr dev_addr = {.bound_dev_if = rec->ifindex,
669 .net = rec->net ? rec->net :
670 &init_net};
671 union rdma_sockaddr sgid_addr, dgid_addr;
672
673 if (!device->get_netdev)
674 return -EOPNOTSUPP;
675
676 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
677 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
678
679 /* validate the route */
680 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
681 &dgid_addr._sockaddr, &dev_addr);
682 if (ret)
683 return ret;
684
685 if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
686 dev_addr.network == RDMA_NETWORK_IPV6) &&
687 rec->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
688 return -EINVAL;
689
690 idev = device->get_netdev(device, port_num);
691 if (!idev)
692 return -ENODEV;
693
694 resolved_dev = dev_get_by_index(dev_addr.net,
695 dev_addr.bound_dev_if);
696 if (!resolved_dev) {
697 dev_put(idev);
698 return -ENODEV;
699 }
700 ndev = ib_get_ndev_from_path(rec);
701 if ((ndev && ndev != resolved_dev) ||
702 (resolved_dev != idev &&
703 rdma_vlan_dev_real_dev(resolved_dev) != idev))
704 ret = -EHOSTUNREACH;
705 dev_put(idev);
706 dev_put(resolved_dev);
707 if (ret) {
708 if (ndev)
709 dev_put(ndev);
710 return ret;
711 }
712 }
713
714 if (rec->hop_limit > 0 || use_roce) {
715 ah_attr->ah_flags = IB_AH_GRH;
716 ah_attr->grh.dgid = rec->dgid;
717
718 ret = ib_find_cached_gid_by_port(device, &rec->sgid,
719 rec->gid_type, port_num, ndev,
720 &gid_index);
721 if (ret) {
722 if (ndev)
723 dev_put(ndev);
724 return ret;
725 }
726
727 ah_attr->grh.sgid_index = gid_index;
728 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
729 ah_attr->grh.hop_limit = rec->hop_limit;
730 ah_attr->grh.traffic_class = rec->traffic_class;
731 if (ndev)
732 dev_put(ndev);
733 }
734
735 if (use_roce)
736 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
737
738 return 0;
739 }
740 EXPORT_SYMBOL(ib_init_ah_from_path);
741
alloc_mad(struct ib_sa_query * query,gfp_t gfp_mask)742 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
743 {
744 unsigned long flags;
745
746 spin_lock_irqsave(&query->port->ah_lock, flags);
747 if (!query->port->sm_ah) {
748 spin_unlock_irqrestore(&query->port->ah_lock, flags);
749 return -EAGAIN;
750 }
751 kref_get(&query->port->sm_ah->ref);
752 query->sm_ah = query->port->sm_ah;
753 spin_unlock_irqrestore(&query->port->ah_lock, flags);
754
755 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
756 query->sm_ah->pkey_index,
757 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
758 gfp_mask,
759 IB_MGMT_BASE_VERSION);
760 if (IS_ERR(query->mad_buf)) {
761 kref_put(&query->sm_ah->ref, free_sm_ah);
762 return -ENOMEM;
763 }
764
765 query->mad_buf->ah = query->sm_ah->ah;
766
767 return 0;
768 }
769
free_mad(struct ib_sa_query * query)770 static void free_mad(struct ib_sa_query *query)
771 {
772 ib_free_send_mad(query->mad_buf);
773 kref_put(&query->sm_ah->ref, free_sm_ah);
774 }
775
init_mad(struct ib_sa_mad * mad,struct ib_mad_agent * agent)776 static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
777 {
778 unsigned long flags;
779
780 memset(mad, 0, sizeof *mad);
781
782 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
783 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
784 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
785
786 spin_lock_irqsave(&tid_lock, flags);
787 mad->mad_hdr.tid =
788 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
789 spin_unlock_irqrestore(&tid_lock, flags);
790 }
791
send_mad(struct ib_sa_query * query,int timeout_ms,gfp_t gfp_mask)792 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
793 {
794 bool preload = gfpflags_allow_blocking(gfp_mask);
795 unsigned long flags;
796 int ret, id;
797
798 if (preload)
799 idr_preload(gfp_mask);
800 spin_lock_irqsave(&idr_lock, flags);
801
802 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
803
804 spin_unlock_irqrestore(&idr_lock, flags);
805 if (preload)
806 idr_preload_end();
807 if (id < 0)
808 return id;
809
810 query->mad_buf->timeout_ms = timeout_ms;
811 query->mad_buf->context[0] = query;
812 query->id = id;
813
814 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
815 ib_sa_disable_local_svc(query);
816 }
817
818 ret = ib_post_send_mad(query->mad_buf, NULL);
819 if (ret) {
820 spin_lock_irqsave(&idr_lock, flags);
821 idr_remove(&query_idr, id);
822 spin_unlock_irqrestore(&idr_lock, flags);
823 }
824
825 /*
826 * It's not safe to dereference query any more, because the
827 * send may already have completed and freed the query in
828 * another context.
829 */
830 return ret ? ret : id;
831 }
832
ib_sa_unpack_path(void * attribute,struct ib_sa_path_rec * rec)833 void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
834 {
835 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
836 }
837 EXPORT_SYMBOL(ib_sa_unpack_path);
838
ib_sa_pack_path(struct ib_sa_path_rec * rec,void * attribute)839 void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute)
840 {
841 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
842 }
843 EXPORT_SYMBOL(ib_sa_pack_path);
844
ib_sa_path_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)845 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
846 int status,
847 struct ib_sa_mad *mad)
848 {
849 struct ib_sa_path_query *query =
850 container_of(sa_query, struct ib_sa_path_query, sa_query);
851
852 if (mad) {
853 struct ib_sa_path_rec rec;
854
855 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
856 mad->data, &rec);
857 rec.net = NULL;
858 rec.ifindex = 0;
859 rec.gid_type = IB_GID_TYPE_IB;
860 eth_zero_addr(rec.dmac);
861 query->callback(status, &rec, query->context);
862 } else
863 query->callback(status, NULL, query->context);
864 }
865
ib_sa_path_rec_release(struct ib_sa_query * sa_query)866 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
867 {
868 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
869 }
870
871 /**
872 * ib_sa_path_rec_get - Start a Path get query
873 * @client:SA client
874 * @device:device to send query on
875 * @port_num: port number to send query on
876 * @rec:Path Record to send in query
877 * @comp_mask:component mask to send in query
878 * @timeout_ms:time to wait for response
879 * @gfp_mask:GFP mask to use for internal allocations
880 * @callback:function called when query completes, times out or is
881 * canceled
882 * @context:opaque user context passed to callback
883 * @sa_query:query context, used to cancel query
884 *
885 * Send a Path Record Get query to the SA to look up a path. The
886 * callback function will be called when the query completes (or
887 * fails); status is 0 for a successful response, -EINTR if the query
888 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
889 * occurred sending the query. The resp parameter of the callback is
890 * only valid if status is 0.
891 *
892 * If the return value of ib_sa_path_rec_get() is negative, it is an
893 * error code. Otherwise it is a query ID that can be used to cancel
894 * the query.
895 */
ib_sa_path_rec_get(struct ib_sa_client * client,struct ib_device * device,u8 port_num,struct ib_sa_path_rec * rec,ib_sa_comp_mask comp_mask,int timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct ib_sa_path_rec * resp,void * context),void * context,struct ib_sa_query ** sa_query)896 int ib_sa_path_rec_get(struct ib_sa_client *client,
897 struct ib_device *device, u8 port_num,
898 struct ib_sa_path_rec *rec,
899 ib_sa_comp_mask comp_mask,
900 int timeout_ms, gfp_t gfp_mask,
901 void (*callback)(int status,
902 struct ib_sa_path_rec *resp,
903 void *context),
904 void *context,
905 struct ib_sa_query **sa_query)
906 {
907 struct ib_sa_path_query *query;
908 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
909 struct ib_sa_port *port;
910 struct ib_mad_agent *agent;
911 struct ib_sa_mad *mad;
912 int ret;
913
914 if (!sa_dev)
915 return -ENODEV;
916
917 port = &sa_dev->port[port_num - sa_dev->start_port];
918 agent = port->agent;
919
920 query = kzalloc(sizeof(*query), gfp_mask);
921 if (!query)
922 return -ENOMEM;
923
924 query->sa_query.port = port;
925 ret = alloc_mad(&query->sa_query, gfp_mask);
926 if (ret)
927 goto err1;
928
929 ib_sa_client_get(client);
930 query->sa_query.client = client;
931 query->callback = callback;
932 query->context = context;
933
934 mad = query->sa_query.mad_buf->mad;
935 init_mad(mad, agent);
936
937 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
938 query->sa_query.release = ib_sa_path_rec_release;
939 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
940 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
941 mad->sa_hdr.comp_mask = comp_mask;
942
943 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
944
945 *sa_query = &query->sa_query;
946
947 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
948 query->sa_query.mad_buf->context[1] = rec;
949
950 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
951 if (ret < 0)
952 goto err2;
953
954 return ret;
955
956 err2:
957 *sa_query = NULL;
958 ib_sa_client_put(query->sa_query.client);
959 free_mad(&query->sa_query);
960
961 err1:
962 kfree(query);
963 return ret;
964 }
965 EXPORT_SYMBOL(ib_sa_path_rec_get);
966
ib_sa_service_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)967 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
968 int status,
969 struct ib_sa_mad *mad)
970 {
971 struct ib_sa_service_query *query =
972 container_of(sa_query, struct ib_sa_service_query, sa_query);
973
974 if (mad) {
975 struct ib_sa_service_rec rec;
976
977 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
978 mad->data, &rec);
979 query->callback(status, &rec, query->context);
980 } else
981 query->callback(status, NULL, query->context);
982 }
983
ib_sa_service_rec_release(struct ib_sa_query * sa_query)984 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
985 {
986 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
987 }
988
989 /**
990 * ib_sa_service_rec_query - Start Service Record operation
991 * @client:SA client
992 * @device:device to send request on
993 * @port_num: port number to send request on
994 * @method:SA method - should be get, set, or delete
995 * @rec:Service Record to send in request
996 * @comp_mask:component mask to send in request
997 * @timeout_ms:time to wait for response
998 * @gfp_mask:GFP mask to use for internal allocations
999 * @callback:function called when request completes, times out or is
1000 * canceled
1001 * @context:opaque user context passed to callback
1002 * @sa_query:request context, used to cancel request
1003 *
1004 * Send a Service Record set/get/delete to the SA to register,
1005 * unregister or query a service record.
1006 * The callback function will be called when the request completes (or
1007 * fails); status is 0 for a successful response, -EINTR if the query
1008 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1009 * occurred sending the query. The resp parameter of the callback is
1010 * only valid if status is 0.
1011 *
1012 * If the return value of ib_sa_service_rec_query() is negative, it is an
1013 * error code. Otherwise it is a request ID that can be used to cancel
1014 * the query.
1015 */
ib_sa_service_rec_query(struct ib_sa_client * client,struct ib_device * device,u8 port_num,u8 method,struct ib_sa_service_rec * rec,ib_sa_comp_mask comp_mask,int timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct ib_sa_service_rec * resp,void * context),void * context,struct ib_sa_query ** sa_query)1016 int ib_sa_service_rec_query(struct ib_sa_client *client,
1017 struct ib_device *device, u8 port_num, u8 method,
1018 struct ib_sa_service_rec *rec,
1019 ib_sa_comp_mask comp_mask,
1020 int timeout_ms, gfp_t gfp_mask,
1021 void (*callback)(int status,
1022 struct ib_sa_service_rec *resp,
1023 void *context),
1024 void *context,
1025 struct ib_sa_query **sa_query)
1026 {
1027 struct ib_sa_service_query *query;
1028 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1029 struct ib_sa_port *port;
1030 struct ib_mad_agent *agent;
1031 struct ib_sa_mad *mad;
1032 int ret;
1033
1034 if (!sa_dev)
1035 return -ENODEV;
1036
1037 port = &sa_dev->port[port_num - sa_dev->start_port];
1038 agent = port->agent;
1039
1040 if (method != IB_MGMT_METHOD_GET &&
1041 method != IB_MGMT_METHOD_SET &&
1042 method != IB_SA_METHOD_DELETE)
1043 return -EINVAL;
1044
1045 query = kzalloc(sizeof(*query), gfp_mask);
1046 if (!query)
1047 return -ENOMEM;
1048
1049 query->sa_query.port = port;
1050 ret = alloc_mad(&query->sa_query, gfp_mask);
1051 if (ret)
1052 goto err1;
1053
1054 ib_sa_client_get(client);
1055 query->sa_query.client = client;
1056 query->callback = callback;
1057 query->context = context;
1058
1059 mad = query->sa_query.mad_buf->mad;
1060 init_mad(mad, agent);
1061
1062 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1063 query->sa_query.release = ib_sa_service_rec_release;
1064 mad->mad_hdr.method = method;
1065 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1066 mad->sa_hdr.comp_mask = comp_mask;
1067
1068 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1069 rec, mad->data);
1070
1071 *sa_query = &query->sa_query;
1072
1073 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1074 if (ret < 0)
1075 goto err2;
1076
1077 return ret;
1078
1079 err2:
1080 *sa_query = NULL;
1081 ib_sa_client_put(query->sa_query.client);
1082 free_mad(&query->sa_query);
1083
1084 err1:
1085 kfree(query);
1086 return ret;
1087 }
1088 EXPORT_SYMBOL(ib_sa_service_rec_query);
1089
ib_sa_mcmember_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)1090 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1091 int status,
1092 struct ib_sa_mad *mad)
1093 {
1094 struct ib_sa_mcmember_query *query =
1095 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1096
1097 if (mad) {
1098 struct ib_sa_mcmember_rec rec;
1099
1100 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1101 mad->data, &rec);
1102 query->callback(status, &rec, query->context);
1103 } else
1104 query->callback(status, NULL, query->context);
1105 }
1106
ib_sa_mcmember_rec_release(struct ib_sa_query * sa_query)1107 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1108 {
1109 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1110 }
1111
ib_sa_mcmember_rec_query(struct ib_sa_client * client,struct ib_device * device,u8 port_num,u8 method,struct ib_sa_mcmember_rec * rec,ib_sa_comp_mask comp_mask,int timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct ib_sa_mcmember_rec * resp,void * context),void * context,struct ib_sa_query ** sa_query)1112 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1113 struct ib_device *device, u8 port_num,
1114 u8 method,
1115 struct ib_sa_mcmember_rec *rec,
1116 ib_sa_comp_mask comp_mask,
1117 int timeout_ms, gfp_t gfp_mask,
1118 void (*callback)(int status,
1119 struct ib_sa_mcmember_rec *resp,
1120 void *context),
1121 void *context,
1122 struct ib_sa_query **sa_query)
1123 {
1124 struct ib_sa_mcmember_query *query;
1125 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1126 struct ib_sa_port *port;
1127 struct ib_mad_agent *agent;
1128 struct ib_sa_mad *mad;
1129 int ret;
1130
1131 if (!sa_dev)
1132 return -ENODEV;
1133
1134 port = &sa_dev->port[port_num - sa_dev->start_port];
1135 agent = port->agent;
1136
1137 query = kzalloc(sizeof(*query), gfp_mask);
1138 if (!query)
1139 return -ENOMEM;
1140
1141 query->sa_query.port = port;
1142 ret = alloc_mad(&query->sa_query, gfp_mask);
1143 if (ret)
1144 goto err1;
1145
1146 ib_sa_client_get(client);
1147 query->sa_query.client = client;
1148 query->callback = callback;
1149 query->context = context;
1150
1151 mad = query->sa_query.mad_buf->mad;
1152 init_mad(mad, agent);
1153
1154 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1155 query->sa_query.release = ib_sa_mcmember_rec_release;
1156 mad->mad_hdr.method = method;
1157 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1158 mad->sa_hdr.comp_mask = comp_mask;
1159
1160 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1161 rec, mad->data);
1162
1163 *sa_query = &query->sa_query;
1164
1165 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1166 if (ret < 0)
1167 goto err2;
1168
1169 return ret;
1170
1171 err2:
1172 *sa_query = NULL;
1173 ib_sa_client_put(query->sa_query.client);
1174 free_mad(&query->sa_query);
1175
1176 err1:
1177 kfree(query);
1178 return ret;
1179 }
1180
1181 /* Support GuidInfoRecord */
ib_sa_guidinfo_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)1182 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1183 int status,
1184 struct ib_sa_mad *mad)
1185 {
1186 struct ib_sa_guidinfo_query *query =
1187 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1188
1189 if (mad) {
1190 struct ib_sa_guidinfo_rec rec;
1191
1192 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1193 mad->data, &rec);
1194 query->callback(status, &rec, query->context);
1195 } else
1196 query->callback(status, NULL, query->context);
1197 }
1198
ib_sa_guidinfo_rec_release(struct ib_sa_query * sa_query)1199 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1200 {
1201 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1202 }
1203
ib_sa_guid_info_rec_query(struct ib_sa_client * client,struct ib_device * device,u8 port_num,struct ib_sa_guidinfo_rec * rec,ib_sa_comp_mask comp_mask,u8 method,int timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct ib_sa_guidinfo_rec * resp,void * context),void * context,struct ib_sa_query ** sa_query)1204 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1205 struct ib_device *device, u8 port_num,
1206 struct ib_sa_guidinfo_rec *rec,
1207 ib_sa_comp_mask comp_mask, u8 method,
1208 int timeout_ms, gfp_t gfp_mask,
1209 void (*callback)(int status,
1210 struct ib_sa_guidinfo_rec *resp,
1211 void *context),
1212 void *context,
1213 struct ib_sa_query **sa_query)
1214 {
1215 struct ib_sa_guidinfo_query *query;
1216 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1217 struct ib_sa_port *port;
1218 struct ib_mad_agent *agent;
1219 struct ib_sa_mad *mad;
1220 int ret;
1221
1222 if (!sa_dev)
1223 return -ENODEV;
1224
1225 if (method != IB_MGMT_METHOD_GET &&
1226 method != IB_MGMT_METHOD_SET &&
1227 method != IB_SA_METHOD_DELETE) {
1228 return -EINVAL;
1229 }
1230
1231 port = &sa_dev->port[port_num - sa_dev->start_port];
1232 agent = port->agent;
1233
1234 query = kzalloc(sizeof(*query), gfp_mask);
1235 if (!query)
1236 return -ENOMEM;
1237
1238 query->sa_query.port = port;
1239 ret = alloc_mad(&query->sa_query, gfp_mask);
1240 if (ret)
1241 goto err1;
1242
1243 ib_sa_client_get(client);
1244 query->sa_query.client = client;
1245 query->callback = callback;
1246 query->context = context;
1247
1248 mad = query->sa_query.mad_buf->mad;
1249 init_mad(mad, agent);
1250
1251 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1252 query->sa_query.release = ib_sa_guidinfo_rec_release;
1253
1254 mad->mad_hdr.method = method;
1255 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1256 mad->sa_hdr.comp_mask = comp_mask;
1257
1258 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1259 mad->data);
1260
1261 *sa_query = &query->sa_query;
1262
1263 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1264 if (ret < 0)
1265 goto err2;
1266
1267 return ret;
1268
1269 err2:
1270 *sa_query = NULL;
1271 ib_sa_client_put(query->sa_query.client);
1272 free_mad(&query->sa_query);
1273
1274 err1:
1275 kfree(query);
1276 return ret;
1277 }
1278 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1279
1280 /* Support get SA ClassPortInfo */
ib_sa_classport_info_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)1281 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1282 int status,
1283 struct ib_sa_mad *mad)
1284 {
1285 unsigned long flags;
1286 struct ib_sa_classport_info_query *query =
1287 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
1288
1289 if (mad) {
1290 struct ib_class_port_info rec;
1291
1292 ib_unpack(classport_info_rec_table,
1293 ARRAY_SIZE(classport_info_rec_table),
1294 mad->data, &rec);
1295
1296 spin_lock_irqsave(&sa_query->port->classport_lock, flags);
1297 if (!status && !sa_query->port->classport_info.valid) {
1298 memcpy(&sa_query->port->classport_info.data, &rec,
1299 sizeof(sa_query->port->classport_info.data));
1300
1301 sa_query->port->classport_info.valid = true;
1302 }
1303 spin_unlock_irqrestore(&sa_query->port->classport_lock, flags);
1304
1305 query->callback(status, &rec, query->context);
1306 } else {
1307 query->callback(status, NULL, query->context);
1308 }
1309 }
1310
ib_sa_portclass_info_rec_release(struct ib_sa_query * sa_query)1311 static void ib_sa_portclass_info_rec_release(struct ib_sa_query *sa_query)
1312 {
1313 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
1314 sa_query));
1315 }
1316
ib_sa_classport_info_rec_query(struct ib_sa_client * client,struct ib_device * device,u8 port_num,int timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct ib_class_port_info * resp,void * context),void * context,struct ib_sa_query ** sa_query)1317 int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
1318 struct ib_device *device, u8 port_num,
1319 int timeout_ms, gfp_t gfp_mask,
1320 void (*callback)(int status,
1321 struct ib_class_port_info *resp,
1322 void *context),
1323 void *context,
1324 struct ib_sa_query **sa_query)
1325 {
1326 struct ib_sa_classport_info_query *query;
1327 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1328 struct ib_sa_port *port;
1329 struct ib_mad_agent *agent;
1330 struct ib_sa_mad *mad;
1331 struct ib_class_port_info cached_class_port_info;
1332 int ret;
1333 unsigned long flags;
1334
1335 if (!sa_dev)
1336 return -ENODEV;
1337
1338 port = &sa_dev->port[port_num - sa_dev->start_port];
1339 agent = port->agent;
1340
1341 /* Use cached ClassPortInfo attribute if valid instead of sending mad */
1342 spin_lock_irqsave(&port->classport_lock, flags);
1343 if (port->classport_info.valid && callback) {
1344 memcpy(&cached_class_port_info, &port->classport_info.data,
1345 sizeof(cached_class_port_info));
1346 spin_unlock_irqrestore(&port->classport_lock, flags);
1347 callback(0, &cached_class_port_info, context);
1348 return 0;
1349 }
1350 spin_unlock_irqrestore(&port->classport_lock, flags);
1351
1352 query = kzalloc(sizeof(*query), gfp_mask);
1353 if (!query)
1354 return -ENOMEM;
1355
1356 query->sa_query.port = port;
1357 ret = alloc_mad(&query->sa_query, gfp_mask);
1358 if (ret)
1359 goto err1;
1360
1361 ib_sa_client_get(client);
1362 query->sa_query.client = client;
1363 query->callback = callback;
1364 query->context = context;
1365
1366 mad = query->sa_query.mad_buf->mad;
1367 init_mad(mad, agent);
1368
1369 query->sa_query.callback = callback ? ib_sa_classport_info_rec_callback : NULL;
1370
1371 query->sa_query.release = ib_sa_portclass_info_rec_release;
1372 /* support GET only */
1373 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1374 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
1375 mad->sa_hdr.comp_mask = 0;
1376 *sa_query = &query->sa_query;
1377
1378 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1379 if (ret < 0)
1380 goto err2;
1381
1382 return ret;
1383
1384 err2:
1385 *sa_query = NULL;
1386 ib_sa_client_put(query->sa_query.client);
1387 free_mad(&query->sa_query);
1388
1389 err1:
1390 kfree(query);
1391 return ret;
1392 }
1393 EXPORT_SYMBOL(ib_sa_classport_info_rec_query);
1394
send_handler(struct ib_mad_agent * agent,struct ib_mad_send_wc * mad_send_wc)1395 static void send_handler(struct ib_mad_agent *agent,
1396 struct ib_mad_send_wc *mad_send_wc)
1397 {
1398 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1399 unsigned long flags;
1400
1401 if (query->callback)
1402 switch (mad_send_wc->status) {
1403 case IB_WC_SUCCESS:
1404 /* No callback -- already got recv */
1405 break;
1406 case IB_WC_RESP_TIMEOUT_ERR:
1407 query->callback(query, -ETIMEDOUT, NULL);
1408 break;
1409 case IB_WC_WR_FLUSH_ERR:
1410 query->callback(query, -EINTR, NULL);
1411 break;
1412 default:
1413 query->callback(query, -EIO, NULL);
1414 break;
1415 }
1416
1417 spin_lock_irqsave(&idr_lock, flags);
1418 idr_remove(&query_idr, query->id);
1419 spin_unlock_irqrestore(&idr_lock, flags);
1420
1421 free_mad(query);
1422 ib_sa_client_put(query->client);
1423 query->release(query);
1424 }
1425
recv_handler(struct ib_mad_agent * mad_agent,struct ib_mad_send_buf * send_buf,struct ib_mad_recv_wc * mad_recv_wc)1426 static void recv_handler(struct ib_mad_agent *mad_agent,
1427 struct ib_mad_send_buf *send_buf,
1428 struct ib_mad_recv_wc *mad_recv_wc)
1429 {
1430 struct ib_sa_query *query;
1431
1432 if (!send_buf)
1433 return;
1434
1435 query = send_buf->context[0];
1436 if (query->callback) {
1437 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
1438 query->callback(query,
1439 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
1440 -EINVAL : 0,
1441 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
1442 else
1443 query->callback(query, -EIO, NULL);
1444 }
1445
1446 ib_free_recv_mad(mad_recv_wc);
1447 }
1448
ib_sa_add_one(struct ib_device * device)1449 static void ib_sa_add_one(struct ib_device *device)
1450 {
1451 struct ib_sa_device *sa_dev;
1452 int s, e, i;
1453 int count = 0;
1454
1455 s = rdma_start_port(device);
1456 e = rdma_end_port(device);
1457
1458 sa_dev = kzalloc(sizeof *sa_dev +
1459 (e - s + 1) * sizeof (struct ib_sa_port),
1460 GFP_KERNEL);
1461 if (!sa_dev)
1462 return;
1463
1464 sa_dev->start_port = s;
1465 sa_dev->end_port = e;
1466
1467 for (i = 0; i <= e - s; ++i) {
1468 spin_lock_init(&sa_dev->port[i].ah_lock);
1469 if (!rdma_cap_ib_sa(device, i + 1))
1470 continue;
1471
1472 sa_dev->port[i].sm_ah = NULL;
1473 sa_dev->port[i].port_num = i + s;
1474
1475 spin_lock_init(&sa_dev->port[i].classport_lock);
1476 sa_dev->port[i].classport_info.valid = false;
1477
1478 sa_dev->port[i].agent =
1479 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
1480 NULL, 0, send_handler,
1481 recv_handler, sa_dev, 0);
1482 if (IS_ERR(sa_dev->port[i].agent))
1483 goto err;
1484
1485 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
1486
1487 count++;
1488 }
1489
1490 if (!count)
1491 goto free;
1492
1493 ib_set_client_data(device, &sa_client, sa_dev);
1494
1495 /*
1496 * We register our event handler after everything is set up,
1497 * and then update our cached info after the event handler is
1498 * registered to avoid any problems if a port changes state
1499 * during our initialization.
1500 */
1501
1502 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
1503 if (ib_register_event_handler(&sa_dev->event_handler))
1504 goto err;
1505
1506 for (i = 0; i <= e - s; ++i) {
1507 if (rdma_cap_ib_sa(device, i + 1))
1508 update_sm_ah(&sa_dev->port[i].update_task);
1509 }
1510
1511 return;
1512
1513 err:
1514 while (--i >= 0) {
1515 if (rdma_cap_ib_sa(device, i + 1))
1516 ib_unregister_mad_agent(sa_dev->port[i].agent);
1517 }
1518 free:
1519 kfree(sa_dev);
1520 return;
1521 }
1522
ib_sa_remove_one(struct ib_device * device,void * client_data)1523 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
1524 {
1525 struct ib_sa_device *sa_dev = client_data;
1526 int i;
1527
1528 if (!sa_dev)
1529 return;
1530
1531 ib_unregister_event_handler(&sa_dev->event_handler);
1532
1533 flush_workqueue(ib_wq);
1534
1535 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1536 if (rdma_cap_ib_sa(device, i + 1)) {
1537 ib_unregister_mad_agent(sa_dev->port[i].agent);
1538 if (sa_dev->port[i].sm_ah)
1539 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1540 }
1541
1542 }
1543
1544 kfree(sa_dev);
1545 }
1546
ib_sa_init(void)1547 int ib_sa_init(void)
1548 {
1549 int ret;
1550
1551 get_random_bytes(&tid, sizeof tid);
1552
1553 ret = ib_register_client(&sa_client);
1554 if (ret) {
1555 pr_err("Couldn't register ib_sa client\n");
1556 goto err1;
1557 }
1558
1559 ret = mcast_init();
1560 if (ret) {
1561 pr_err("Couldn't initialize multicast handling\n");
1562 goto err2;
1563 }
1564
1565 return 0;
1566
1567 err2:
1568 ib_unregister_client(&sa_client);
1569 err1:
1570 return ret;
1571 }
1572
ib_sa_cleanup(void)1573 void ib_sa_cleanup(void)
1574 {
1575 mcast_cleanup();
1576 ib_unregister_client(&sa_client);
1577 idr_destroy(&query_idr);
1578 }
1579