1 /*
2 * Copyright (c) 2004-2009 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved.
4 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2009 Sun Microsystems, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 */
37
38 /*
39 * Abstract:
40 * Implementation of osm_vendor_t (for umad).
41 * This object represents the OpenIB vendor layer.
42 * This object is part of the opensm family of objects.
43 *
44 * Environment:
45 * Linux User Mode
46 *
47 */
48
49 #if HAVE_CONFIG_H
50 # include <config.h>
51 #endif /* HAVE_CONFIG_H */
52
53 #ifdef OSM_VENDOR_INTF_OPENIB
54
55 #include <unistd.h>
56 #include <stdlib.h>
57 #include <fcntl.h>
58 #include <errno.h>
59
60 #include <iba/ib_types.h>
61 #include <complib/cl_qlist.h>
62 #include <complib/cl_math.h>
63 #include <complib/cl_debug.h>
64 #include <opensm/osm_file_ids.h>
65 #define FILE_ID OSM_FILE_VENDOR_IBUMAD_C
66 #include <opensm/osm_madw.h>
67 #include <opensm/osm_log.h>
68 #include <opensm/osm_mad_pool.h>
69 #include <opensm/osm_helper.h>
70 #include <vendor/osm_vendor_api.h>
71
72 /****s* OpenSM: Vendor UMAD/osm_umad_bind_info_t
73 * NAME
74 * osm_umad_bind_info_t
75 *
76 * DESCRIPTION
77 * Structure containing bind information.
78 *
79 * SYNOPSIS
80 */
81 typedef struct _osm_umad_bind_info {
82 osm_vendor_t *p_vend;
83 void *client_context;
84 osm_mad_pool_t *p_mad_pool;
85 osm_vend_mad_recv_callback_t mad_recv_callback;
86 osm_vend_mad_send_err_callback_t send_err_callback;
87 ib_net64_t port_guid;
88 int port_id;
89 int agent_id;
90 int agent_id1; /* SMI requires two agents */
91 int timeout;
92 int max_retries;
93 } osm_umad_bind_info_t;
94
95 typedef struct _umad_receiver {
96 pthread_t tid;
97 osm_vendor_t *p_vend;
98 osm_log_t *p_log;
99 } umad_receiver_t;
100
101 static void osm_vendor_close_port(osm_vendor_t * const p_vend);
102
log_send_error(osm_vendor_t * const p_vend,osm_madw_t * p_madw)103 static void log_send_error(osm_vendor_t * const p_vend, osm_madw_t *p_madw)
104 {
105 if (p_madw->p_mad->mgmt_class != IB_MCLASS_SUBN_DIR) {
106 /* LID routed */
107 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5410: "
108 "Send completed with error (%s) -- dropping\n"
109 "\t\t\tClass 0x%x, Method 0x%X, Attr 0x%X, "
110 "TID 0x%" PRIx64 ", LID %u\n",
111 ib_get_err_str(p_madw->status),
112 p_madw->p_mad->mgmt_class, p_madw->p_mad->method,
113 cl_ntoh16(p_madw->p_mad->attr_id),
114 cl_ntoh64(p_madw->p_mad->trans_id),
115 cl_ntoh16(p_madw->mad_addr.dest_lid));
116 } else {
117 ib_smp_t *p_smp;
118
119 /* Direct routed SMP */
120 p_smp = osm_madw_get_smp_ptr(p_madw);
121 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5411: "
122 "DR SMP Send completed with error (%s) -- dropping\n"
123 "\t\t\tMethod 0x%X, Attr 0x%X, TID 0x%" PRIx64 "\n",
124 ib_get_err_str(p_madw->status),
125 p_madw->p_mad->method,
126 cl_ntoh16(p_madw->p_mad->attr_id),
127 cl_ntoh64(p_madw->p_mad->trans_id));
128 osm_dump_smp_dr_path(p_vend->p_log, p_smp, OSM_LOG_ERROR);
129 }
130 }
131
clear_madw(osm_vendor_t * p_vend)132 static void clear_madw(osm_vendor_t * p_vend)
133 {
134 umad_match_t *m, *e, *old_m;
135 ib_net64_t old_tid;
136 uint8_t old_mgmt_class;
137
138 OSM_LOG_ENTER(p_vend->p_log);
139 pthread_mutex_lock(&p_vend->match_tbl_mutex);
140 for (m = p_vend->mtbl.tbl, e = m + p_vend->mtbl.max; m < e; m++) {
141 if (m->tid) {
142 old_m = m;
143 old_tid = m->tid;
144 old_mgmt_class = m->mgmt_class;
145 m->tid = 0;
146 osm_mad_pool_put(((osm_umad_bind_info_t
147 *) ((osm_madw_t *) m->v)->h_bind)->
148 p_mad_pool, m->v);
149 pthread_mutex_unlock(&p_vend->match_tbl_mutex);
150 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5401: "
151 "evicting entry %p (tid was 0x%" PRIx64
152 " mgmt class 0x%x)\n",
153 old_m, cl_ntoh64(old_tid), old_mgmt_class);
154 goto Exit;
155 }
156 }
157 pthread_mutex_unlock(&p_vend->match_tbl_mutex);
158
159 Exit:
160 OSM_LOG_EXIT(p_vend->p_log);
161 }
162
get_madw(osm_vendor_t * p_vend,ib_net64_t * tid,uint8_t mgmt_class)163 static osm_madw_t *get_madw(osm_vendor_t * p_vend, ib_net64_t * tid,
164 uint8_t mgmt_class)
165 {
166 umad_match_t *m, *e;
167 ib_net64_t mtid = (*tid & CL_HTON64(0x00000000ffffffffULL));
168 osm_madw_t *res;
169
170 /*
171 * Since mtid == 0 is the empty key, we should not
172 * waste time looking for it
173 */
174 if (mtid == 0 || mgmt_class == 0)
175 return 0;
176
177 pthread_mutex_lock(&p_vend->match_tbl_mutex);
178 for (m = p_vend->mtbl.tbl, e = m + p_vend->mtbl.max; m < e; m++) {
179 if (m->tid == mtid && m->mgmt_class == mgmt_class) {
180 m->tid = 0;
181 m->mgmt_class = 0;
182 *tid = mtid;
183 res = m->v;
184 pthread_mutex_unlock(&p_vend->match_tbl_mutex);
185 return res;
186 }
187 }
188
189 pthread_mutex_unlock(&p_vend->match_tbl_mutex);
190 return 0;
191 }
192
193 /*
194 * If match table full, evict LRU (least recently used) transaction.
195 * Maintain 2 LRUs: one for SMPs, and one for others (GS).
196 * Evict LRU GS transaction if one is available and only evict LRU SMP
197 * transaction if no other choice.
198 */
199 static void
put_madw(osm_vendor_t * p_vend,osm_madw_t * p_madw,ib_net64_t tid,uint8_t mgmt_class)200 put_madw(osm_vendor_t * p_vend, osm_madw_t * p_madw, ib_net64_t tid,
201 uint8_t mgmt_class)
202 {
203 umad_match_t *m, *e, *old_lru, *lru = 0, *lru_smp = 0;
204 osm_madw_t *p_req_madw;
205 osm_umad_bind_info_t *p_bind;
206 ib_net64_t old_tid;
207 uint32_t oldest = ~0, oldest_smp = ~0;
208 uint8_t old_mgmt_class;
209
210 pthread_mutex_lock(&p_vend->match_tbl_mutex);
211 for (m = p_vend->mtbl.tbl, e = m + p_vend->mtbl.max; m < e; m++) {
212 if (m->tid == 0 && m->mgmt_class == 0) {
213 m->tid = tid;
214 m->mgmt_class = mgmt_class;
215 m->v = p_madw;
216 m->version =
217 cl_atomic_inc((atomic32_t *) & p_vend->mtbl.
218 last_version);
219 pthread_mutex_unlock(&p_vend->match_tbl_mutex);
220 return;
221 }
222 if (m->mgmt_class == IB_MCLASS_SUBN_DIR ||
223 m->mgmt_class == IB_MCLASS_SUBN_LID) {
224 if (oldest_smp >= m->version) {
225 oldest_smp = m->version;
226 lru_smp = m;
227 }
228 } else {
229 if (oldest >= m->version) {
230 oldest = m->version;
231 lru = m;
232 }
233 }
234 }
235
236 if (oldest != ~0) {
237 old_lru = lru;
238 old_tid = lru->tid;
239 old_mgmt_class = lru->mgmt_class;
240 } else {
241 CL_ASSERT(oldest_smp != ~0);
242 old_lru = lru_smp;
243 old_tid = lru_smp->tid;
244 old_mgmt_class = lru_smp->mgmt_class;
245 }
246 p_req_madw = old_lru->v;
247 p_bind = p_req_madw->h_bind;
248 p_req_madw->status = IB_CANCELED;
249 log_send_error(p_vend, p_req_madw);
250 pthread_mutex_lock(&p_vend->cb_mutex);
251 (*p_bind->send_err_callback) (p_bind->client_context, p_req_madw);
252 pthread_mutex_unlock(&p_vend->cb_mutex);
253 if (mgmt_class == IB_MCLASS_SUBN_DIR ||
254 mgmt_class == IB_MCLASS_SUBN_LID) {
255 lru_smp->tid = tid;
256 lru_smp->mgmt_class = mgmt_class;
257 lru_smp->v = p_madw;
258 lru_smp->version =
259 cl_atomic_inc((atomic32_t *) & p_vend->mtbl.last_version);
260 } else {
261 lru->tid = tid;
262 lru->mgmt_class = mgmt_class;
263 lru->v = p_madw;
264 lru->version =
265 cl_atomic_inc((atomic32_t *) & p_vend->mtbl.last_version);
266 }
267 pthread_mutex_unlock(&p_vend->match_tbl_mutex);
268 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5402: "
269 "evicting entry %p (tid was 0x%" PRIx64
270 " mgmt class 0x%x)\n", old_lru,
271 cl_ntoh64(old_tid), old_mgmt_class);
272 }
273
274 static void
ib_mad_addr_conv(ib_user_mad_t * umad,osm_mad_addr_t * osm_mad_addr,int is_smi)275 ib_mad_addr_conv(ib_user_mad_t * umad, osm_mad_addr_t * osm_mad_addr,
276 int is_smi)
277 {
278 ib_mad_addr_t *ib_mad_addr = umad_get_mad_addr(umad);
279
280 memset(osm_mad_addr, 0, sizeof(osm_mad_addr_t));
281 osm_mad_addr->dest_lid = ib_mad_addr->lid;
282 osm_mad_addr->path_bits = ib_mad_addr->path_bits;
283
284 if (is_smi) {
285 osm_mad_addr->addr_type.smi.source_lid = osm_mad_addr->dest_lid;
286 osm_mad_addr->addr_type.smi.port_num = 255; /* not used */
287 return;
288 }
289
290 osm_mad_addr->addr_type.gsi.remote_qp = ib_mad_addr->qpn;
291 osm_mad_addr->addr_type.gsi.remote_qkey = ib_mad_addr->qkey;
292 osm_mad_addr->addr_type.gsi.pkey_ix = umad_get_pkey(umad);
293 osm_mad_addr->addr_type.gsi.service_level = ib_mad_addr->sl;
294 if (ib_mad_addr->grh_present) {
295 osm_mad_addr->addr_type.gsi.global_route = 1;
296 osm_mad_addr->addr_type.gsi.grh_info.hop_limit = ib_mad_addr->hop_limit;
297 osm_mad_addr->addr_type.gsi.grh_info.ver_class_flow =
298 ib_grh_set_ver_class_flow(6, /* GRH version */
299 ib_mad_addr->traffic_class,
300 ib_mad_addr->flow_label);
301 memcpy(&osm_mad_addr->addr_type.gsi.grh_info.dest_gid,
302 &ib_mad_addr->gid, 16);
303 }
304 }
305
swap_mad_bufs(osm_madw_t * p_madw,void * umad)306 static void *swap_mad_bufs(osm_madw_t * p_madw, void *umad)
307 {
308 void *old;
309
310 old = p_madw->vend_wrap.umad;
311 p_madw->vend_wrap.umad = umad;
312 p_madw->p_mad = umad_get_mad(umad);
313
314 return old;
315 }
316
unlock_mutex(void * arg)317 static void unlock_mutex(void *arg)
318 {
319 pthread_mutex_unlock(arg);
320 }
321
umad_receiver(void * p_ptr)322 static void *umad_receiver(void *p_ptr)
323 {
324 umad_receiver_t *const p_ur = (umad_receiver_t *) p_ptr;
325 osm_vendor_t *p_vend = p_ur->p_vend;
326 osm_umad_bind_info_t *p_bind;
327 osm_mad_addr_t osm_addr;
328 osm_madw_t *p_madw, *p_req_madw;
329 ib_mad_t *p_mad, *p_req_mad;
330 void *umad = 0;
331 int mad_agent, length;
332
333 OSM_LOG_ENTER(p_ur->p_log);
334
335 for (;;) {
336 if (!umad &&
337 !(umad = umad_alloc(1, umad_size() + MAD_BLOCK_SIZE))) {
338 OSM_LOG(p_ur->p_log, OSM_LOG_ERROR, "ERR 5403: "
339 "can't alloc MAD sized umad\n");
340 break;
341 }
342
343 length = MAD_BLOCK_SIZE;
344 if ((mad_agent = umad_recv(p_vend->umad_port_id, umad,
345 &length, -1)) < 0) {
346 if (length <= MAD_BLOCK_SIZE) {
347 OSM_LOG(p_ur->p_log, OSM_LOG_ERROR, "ERR 5404: "
348 "recv error on MAD sized umad (%m)\n");
349 continue;
350 } else {
351 umad_free(umad);
352 /* Need a larger buffer for RMPP */
353 umad = umad_alloc(1, umad_size() + length);
354 if (!umad) {
355 OSM_LOG(p_ur->p_log, OSM_LOG_ERROR,
356 "ERR 5405: "
357 "can't alloc umad length %d\n",
358 length);
359 continue;
360 }
361
362 if ((mad_agent = umad_recv(p_vend->umad_port_id,
363 umad, &length,
364 -1)) < 0) {
365 OSM_LOG(p_ur->p_log, OSM_LOG_ERROR,
366 "ERR 5406: "
367 "recv error on umad length %d (%m)\n",
368 length);
369 continue;
370 }
371 }
372 }
373
374 if (mad_agent >= OSM_UMAD_MAX_AGENTS ||
375 !(p_bind = p_vend->agents[mad_agent])) {
376 OSM_LOG(p_ur->p_log, OSM_LOG_ERROR, "ERR 5407: "
377 "invalid mad agent %d - dropping\n", mad_agent);
378 continue;
379 }
380
381 p_mad = (ib_mad_t *) umad_get_mad(umad);
382
383 ib_mad_addr_conv(umad, &osm_addr,
384 p_mad->mgmt_class == IB_MCLASS_SUBN_LID ||
385 p_mad->mgmt_class == IB_MCLASS_SUBN_DIR);
386
387 if (!(p_madw = osm_mad_pool_get(p_bind->p_mad_pool,
388 (osm_bind_handle_t) p_bind,
389 MAX(length, MAD_BLOCK_SIZE),
390 &osm_addr))) {
391 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5408: "
392 "request for a new madw failed -- dropping packet\n");
393 continue;
394 }
395
396 /* Need to fix up MAD size if short RMPP packet */
397 if (length < MAD_BLOCK_SIZE)
398 p_madw->mad_size = length;
399
400 /*
401 * Avoid copying by swapping mad buf pointers.
402 * Do not use umad after this line of code.
403 */
404 umad = swap_mad_bufs(p_madw, umad);
405
406 /* if status != 0 then we are handling recv timeout on send */
407 if (umad_status(p_madw->vend_wrap.umad)) {
408 if (!(p_req_madw = get_madw(p_vend, &p_mad->trans_id,
409 p_mad->mgmt_class))) {
410 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR,
411 "ERR 5412: "
412 "Failed to obtain request madw for timed out MAD"
413 " (class=0x%X method=0x%X attr=0x%X tid=0x%"PRIx64") -- dropping\n",
414 p_mad->mgmt_class, p_mad->method,
415 cl_ntoh16(p_mad->attr_id),
416 cl_ntoh64(p_mad->trans_id));
417 } else {
418 p_req_madw->status = IB_TIMEOUT;
419 log_send_error(p_vend, p_req_madw);
420 /* cb frees req_madw */
421 pthread_mutex_lock(&p_vend->cb_mutex);
422 pthread_cleanup_push(unlock_mutex,
423 &p_vend->cb_mutex);
424 (*p_bind->send_err_callback) (p_bind->
425 client_context,
426 p_req_madw);
427 pthread_cleanup_pop(1);
428 }
429
430 osm_mad_pool_put(p_bind->p_mad_pool, p_madw);
431 continue;
432 }
433
434 p_req_madw = 0;
435 if (ib_mad_is_response(p_mad)) {
436 p_req_madw = get_madw(p_vend, &p_mad->trans_id,
437 p_mad->mgmt_class);
438 if (PF(!p_req_madw)) {
439 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR,
440 "ERR 5413: Failed to obtain request "
441 "madw for received MAD "
442 "(class=0x%X method=0x%X attr=0x%X "
443 "tid=0x%"PRIx64") -- dropping\n",
444 p_mad->mgmt_class, p_mad->method,
445 cl_ntoh16(p_mad->attr_id),
446 cl_ntoh64(p_mad->trans_id));
447 osm_mad_pool_put(p_bind->p_mad_pool, p_madw);
448 continue;
449 }
450
451 /*
452 * Check that request MAD was really a request,
453 * and make sure that attribute ID, attribute
454 * modifier and transaction ID are the same in
455 * request and response.
456 *
457 * Exception for o15-0.2-1.11:
458 * SA response to a SubnAdmGetMulti() containing a
459 * MultiPathRecord shall have PathRecord attribute ID.
460 */
461 p_req_mad = osm_madw_get_mad_ptr(p_req_madw);
462 if (PF(ib_mad_is_response(p_req_mad) ||
463 (p_mad->attr_id != p_req_mad->attr_id &&
464 !(p_mad->mgmt_class == IB_MCLASS_SUBN_ADM &&
465 p_req_mad->attr_id ==
466 IB_MAD_ATTR_MULTIPATH_RECORD &&
467 p_mad->attr_id == IB_MAD_ATTR_PATH_RECORD)) ||
468 p_mad->attr_mod != p_req_mad->attr_mod ||
469 p_mad->trans_id != p_req_mad->trans_id)) {
470 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR,
471 "ERR 541A: "
472 "Response MAD validation failed "
473 "(request attr=0x%X modif=0x%X "
474 "tid=0x%"PRIx64", "
475 "response attr=0x%X modif=0x%X "
476 "tid=0x%"PRIx64") -- dropping\n",
477 cl_ntoh16(p_req_mad->attr_id),
478 cl_ntoh32(p_req_mad->attr_mod),
479 cl_ntoh64(p_req_mad->trans_id),
480 cl_ntoh16(p_mad->attr_id),
481 cl_ntoh32(p_mad->attr_mod),
482 cl_ntoh64(p_mad->trans_id));
483 osm_mad_pool_put(p_bind->p_mad_pool, p_madw);
484 continue;
485 }
486 }
487
488 #ifndef VENDOR_RMPP_SUPPORT
489 if ((p_mad->mgmt_class != IB_MCLASS_SUBN_DIR) &&
490 (p_mad->mgmt_class != IB_MCLASS_SUBN_LID) &&
491 (ib_rmpp_is_flag_set((ib_rmpp_mad_t *) p_mad,
492 IB_RMPP_FLAG_ACTIVE))) {
493 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5414: "
494 "class 0x%x method 0x%x RMPP version %d type "
495 "%d flags 0x%x received -- dropping\n",
496 p_mad->mgmt_class, p_mad->method,
497 ((ib_rmpp_mad_t *) p_mad)->rmpp_version,
498 ((ib_rmpp_mad_t *) p_mad)->rmpp_type,
499 ((ib_rmpp_mad_t *) p_mad)->rmpp_flags);
500 osm_mad_pool_put(p_bind->p_mad_pool, p_madw);
501 continue;
502 }
503 #endif
504
505 /* call the CB */
506 pthread_mutex_lock(&p_vend->cb_mutex);
507 pthread_cleanup_push(unlock_mutex, &p_vend->cb_mutex);
508 (*p_bind->mad_recv_callback) (p_madw, p_bind->client_context,
509 p_req_madw);
510 pthread_cleanup_pop(1);
511 }
512
513 OSM_LOG_EXIT(p_vend->p_log);
514 return NULL;
515 }
516
umad_receiver_start(osm_vendor_t * p_vend)517 static int umad_receiver_start(osm_vendor_t * p_vend)
518 {
519 umad_receiver_t *p_ur = p_vend->receiver;
520
521 p_ur->p_vend = p_vend;
522 p_ur->p_log = p_vend->p_log;
523
524 if (pthread_create(&p_ur->tid, NULL, umad_receiver, p_ur) != 0)
525 return -1;
526
527 return 0;
528 }
529
umad_receiver_stop(umad_receiver_t * p_ur)530 static void umad_receiver_stop(umad_receiver_t * p_ur)
531 {
532 pthread_cancel(p_ur->tid);
533 pthread_join(p_ur->tid, NULL);
534 p_ur->tid = 0;
535 p_ur->p_vend = NULL;
536 p_ur->p_log = NULL;
537 }
538
539 ib_api_status_t
osm_vendor_init(IN osm_vendor_t * const p_vend,IN osm_log_t * const p_log,IN const uint32_t timeout)540 osm_vendor_init(IN osm_vendor_t * const p_vend,
541 IN osm_log_t * const p_log, IN const uint32_t timeout)
542 {
543 char *max = NULL;
544 int r, n_cas;
545
546 OSM_LOG_ENTER(p_log);
547
548 p_vend->p_log = p_log;
549 p_vend->timeout = timeout;
550 p_vend->max_retries = OSM_DEFAULT_RETRY_COUNT;
551 pthread_mutex_init(&p_vend->cb_mutex, NULL);
552 pthread_mutex_init(&p_vend->match_tbl_mutex, NULL);
553 p_vend->umad_port_id = -1;
554 p_vend->issmfd = -1;
555
556 /*
557 * Open our instance of UMAD.
558 */
559 if ((r = umad_init()) < 0) {
560 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR,
561 "ERR 5415: Error opening UMAD\n");
562 }
563
564 if ((n_cas = umad_get_cas_names(p_vend->ca_names,
565 OSM_UMAD_MAX_CAS)) < 0) {
566 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR,
567 "ERR 5416: umad_get_cas_names failed\n");
568 r = n_cas;
569 goto Exit;
570 }
571
572 p_vend->ca_count = n_cas;
573 p_vend->mtbl.max = DEFAULT_OSM_UMAD_MAX_PENDING;
574
575 if ((max = getenv("OSM_UMAD_MAX_PENDING")) != NULL) {
576 int tmp = strtol(max, NULL, 0);
577 if (tmp > 0)
578 p_vend->mtbl.max = tmp;
579 else
580 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "Error:"
581 "OSM_UMAD_MAX_PENDING=%d is invalid\n",
582 tmp);
583 }
584
585 OSM_LOG(p_vend->p_log, OSM_LOG_INFO, "%d pending umads specified\n",
586 p_vend->mtbl.max);
587
588 p_vend->mtbl.tbl = calloc(p_vend->mtbl.max, sizeof(*(p_vend->mtbl.tbl)));
589 if (!p_vend->mtbl.tbl) {
590 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "Error:"
591 "failed to allocate vendor match table\n");
592 r = IB_INSUFFICIENT_MEMORY;
593 goto Exit;
594 }
595
596 Exit:
597 OSM_LOG_EXIT(p_log);
598 return (r);
599 }
600
osm_vendor_new(IN osm_log_t * const p_log,IN const uint32_t timeout)601 osm_vendor_t *osm_vendor_new(IN osm_log_t * const p_log,
602 IN const uint32_t timeout)
603 {
604 osm_vendor_t *p_vend = NULL;
605
606 OSM_LOG_ENTER(p_log);
607
608 if (!timeout) {
609 OSM_LOG(p_log, OSM_LOG_ERROR, "ERR 5433: "
610 "transaction timeout cannot be 0\n");
611 goto Exit;
612 }
613
614 p_vend = malloc(sizeof(*p_vend));
615 if (p_vend == NULL) {
616 OSM_LOG(p_log, OSM_LOG_ERROR, "ERR 5417: "
617 "Unable to allocate vendor object\n");
618 goto Exit;
619 }
620
621 memset(p_vend, 0, sizeof(*p_vend));
622
623 if (osm_vendor_init(p_vend, p_log, timeout) != IB_SUCCESS) {
624 free(p_vend);
625 p_vend = NULL;
626 }
627
628 Exit:
629 OSM_LOG_EXIT(p_log);
630 return (p_vend);
631 }
632
osm_vendor_delete(IN osm_vendor_t ** const pp_vend)633 void osm_vendor_delete(IN osm_vendor_t ** const pp_vend)
634 {
635 osm_vendor_close_port(*pp_vend);
636
637 clear_madw(*pp_vend);
638 /* make sure all ports are closed */
639 umad_done();
640
641 pthread_mutex_destroy(&(*pp_vend)->cb_mutex);
642 pthread_mutex_destroy(&(*pp_vend)->match_tbl_mutex);
643 free((*pp_vend)->mtbl.tbl);
644 free(*pp_vend);
645 *pp_vend = NULL;
646 }
647
648 ib_api_status_t
osm_vendor_get_all_port_attr(IN osm_vendor_t * const p_vend,IN ib_port_attr_t * const p_attr_array,IN uint32_t * const p_num_ports)649 osm_vendor_get_all_port_attr(IN osm_vendor_t * const p_vend,
650 IN ib_port_attr_t * const p_attr_array,
651 IN uint32_t * const p_num_ports)
652 {
653 umad_ca_t ca;
654 ib_port_attr_t *attr = p_attr_array;
655 unsigned done = 0;
656 int r = 0, i, j, k;
657
658 OSM_LOG_ENTER(p_vend->p_log);
659
660 CL_ASSERT(p_vend && p_num_ports);
661
662 if (!*p_num_ports) {
663 r = IB_INVALID_PARAMETER;
664 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5418: "
665 "Ports in should be > 0\n");
666 goto Exit;
667 }
668
669 if (!p_attr_array) {
670 r = IB_INSUFFICIENT_MEMORY;
671 *p_num_ports = 0;
672 goto Exit;
673 }
674
675 for (i = 0; i < p_vend->ca_count && !done; i++) {
676 /* For each CA, retrieve the port attributes */
677 if (umad_get_ca(p_vend->ca_names[i], &ca) == 0) {
678 if (ca.node_type < 1 || ca.node_type > 3)
679 continue;
680 for (j = 0; j <= ca.numports; j++) {
681 if (!ca.ports[j])
682 continue;
683 attr->port_guid = ca.ports[j]->port_guid;
684 attr->lid = ca.ports[j]->base_lid;
685 attr->port_num = ca.ports[j]->portnum;
686 attr->sm_lid = ca.ports[j]->sm_lid;
687 attr->sm_sl = ca.ports[j]->sm_sl;
688 attr->link_state = ca.ports[j]->state;
689 if (attr->num_pkeys && attr->p_pkey_table) {
690 if (attr->num_pkeys > ca.ports[j]->pkeys_size)
691 attr->num_pkeys = ca.ports[j]->pkeys_size;
692 for (k = 0; k < attr->num_pkeys; k++)
693 attr->p_pkey_table[k] =
694 cl_hton16(ca.ports[j]->pkeys[k]);
695 }
696 attr->num_pkeys = ca.ports[j]->pkeys_size;
697 if (attr->num_gids && attr->p_gid_table) {
698 attr->p_gid_table[0].unicast.prefix = cl_hton64(ca.ports[j]->gid_prefix);
699 attr->p_gid_table[0].unicast.interface_id = cl_hton64(ca.ports[j]->port_guid);
700 attr->num_gids = 1;
701 }
702 attr++;
703 if (attr - p_attr_array > *p_num_ports) {
704 done = 1;
705 break;
706 }
707 }
708 umad_release_ca(&ca);
709 }
710 }
711
712 *p_num_ports = attr - p_attr_array;
713
714 Exit:
715 OSM_LOG_EXIT(p_vend->p_log);
716 return r;
717 }
718
719 static int
osm_vendor_open_port(IN osm_vendor_t * const p_vend,IN const ib_net64_t port_guid)720 osm_vendor_open_port(IN osm_vendor_t * const p_vend,
721 IN const ib_net64_t port_guid)
722 {
723 ib_net64_t portguids[OSM_UMAD_MAX_PORTS_PER_CA + 1];
724 umad_ca_t umad_ca;
725 int i = 0, umad_port_id = -1;
726 char *name;
727 int ca, r;
728
729 CL_ASSERT(p_vend);
730
731 OSM_LOG_ENTER(p_vend->p_log);
732
733 if (p_vend->umad_port_id >= 0) {
734 umad_port_id = p_vend->umad_port_id;
735 goto Exit;
736 }
737
738 if (!port_guid) {
739 name = NULL;
740 i = 0;
741 goto _found;
742 }
743
744 for (ca = 0; ca < p_vend->ca_count; ca++) {
745 if ((r = umad_get_ca_portguids(p_vend->ca_names[ca], portguids,
746 OSM_UMAD_MAX_PORTS_PER_CA + 1)) < 0) {
747 #ifdef __WIN__
748 OSM_LOG(p_vend->p_log, OSM_LOG_VERBOSE,
749 #else
750 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5421: "
751 #endif
752 "Unable to get CA %s port guids (%s)\n",
753 p_vend->ca_names[ca], strerror(r));
754 continue;
755 }
756 for (i = 0; i < r; i++)
757 if (port_guid == portguids[i]) {
758 name = p_vend->ca_names[ca];
759 goto _found;
760 }
761 }
762
763 /*
764 * No local CA owns this guid!
765 */
766 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5422: "
767 "Unable to find requested CA guid 0x%" PRIx64 "\n",
768 cl_ntoh64(port_guid));
769 goto Exit;
770
771 _found:
772 /* Validate that node is an IB node type (not iWARP) */
773 if (umad_get_ca(name, &umad_ca) < 0) {
774 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 542A: "
775 "umad_get_ca() failed\n");
776 goto Exit;
777 }
778
779 if (umad_ca.node_type < 1 || umad_ca.node_type > 3) {
780 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 542D: "
781 "Type %d of node \'%s\' is not an IB node type\n",
782 umad_ca.node_type, umad_ca.ca_name);
783 fprintf(stderr,
784 "Type %d of node \'%s\' is not an IB node type\n",
785 umad_ca.node_type, umad_ca.ca_name);
786 umad_release_ca(&umad_ca);
787 goto Exit;
788 }
789 umad_release_ca(&umad_ca);
790
791 /* Port found, try to open it */
792 if (umad_get_port(name, i, &p_vend->umad_port) < 0) {
793 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 542B: "
794 "umad_get_port() failed\n");
795 goto Exit;
796 }
797
798 if ((umad_port_id = umad_open_port(p_vend->umad_port.ca_name,
799 p_vend->umad_port.portnum)) < 0) {
800 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 542C: "
801 "umad_open_port() failed\n");
802 goto Exit;
803 }
804
805 p_vend->umad_port_id = umad_port_id;
806
807 /* start receiver thread */
808 if (!(p_vend->receiver = calloc(1, sizeof(umad_receiver_t)))) {
809 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5423: "
810 "Unable to alloc receiver struct\n");
811 umad_close_port(umad_port_id);
812 umad_release_port(&p_vend->umad_port);
813 p_vend->umad_port.port_guid = 0;
814 p_vend->umad_port_id = umad_port_id = -1;
815 goto Exit;
816 }
817 if (umad_receiver_start(p_vend) != 0) {
818 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5420: "
819 "umad_receiver_init failed\n");
820 umad_close_port(umad_port_id);
821 umad_release_port(&p_vend->umad_port);
822 p_vend->umad_port.port_guid = 0;
823 p_vend->umad_port_id = umad_port_id = -1;
824 }
825
826 Exit:
827 OSM_LOG_EXIT(p_vend->p_log);
828 return umad_port_id;
829 }
830
osm_vendor_close_port(osm_vendor_t * const p_vend)831 static void osm_vendor_close_port(osm_vendor_t * const p_vend)
832 {
833 umad_receiver_t *p_ur;
834 int i;
835
836 p_ur = p_vend->receiver;
837 p_vend->receiver = NULL;
838 if (p_ur) {
839 umad_receiver_stop(p_ur);
840 free(p_ur);
841 }
842
843 if (p_vend->umad_port_id >= 0) {
844 for (i = 0; i < OSM_UMAD_MAX_AGENTS; i++)
845 if (p_vend->agents[i])
846 umad_unregister(p_vend->umad_port_id, i);
847 umad_close_port(p_vend->umad_port_id);
848 umad_release_port(&p_vend->umad_port);
849 p_vend->umad_port.port_guid = 0;
850 p_vend->umad_port_id = -1;
851 }
852 }
853
set_bit(int nr,void * method_mask)854 static int set_bit(int nr, void *method_mask)
855 {
856 long mask, *addr = method_mask;
857 int retval;
858
859 addr += nr / (8 * sizeof(long));
860 mask = 1L << (nr % (8 * sizeof(long)));
861 retval = (mask & *addr) != 0;
862 *addr |= mask;
863 return retval;
864 }
865
866 osm_bind_handle_t
osm_vendor_bind(IN osm_vendor_t * const p_vend,IN osm_bind_info_t * const p_user_bind,IN osm_mad_pool_t * const p_mad_pool,IN osm_vend_mad_recv_callback_t mad_recv_callback,IN osm_vend_mad_send_err_callback_t send_err_callback,IN void * context)867 osm_vendor_bind(IN osm_vendor_t * const p_vend,
868 IN osm_bind_info_t * const p_user_bind,
869 IN osm_mad_pool_t * const p_mad_pool,
870 IN osm_vend_mad_recv_callback_t mad_recv_callback,
871 IN osm_vend_mad_send_err_callback_t send_err_callback,
872 IN void *context)
873 {
874 ib_net64_t port_guid;
875 osm_umad_bind_info_t *p_bind = 0;
876 long method_mask[16 / sizeof(long)];
877 int umad_port_id;
878 uint8_t rmpp_version;
879
880 OSM_LOG_ENTER(p_vend->p_log);
881
882 CL_ASSERT(p_user_bind);
883 CL_ASSERT(p_mad_pool);
884 CL_ASSERT(mad_recv_callback);
885 CL_ASSERT(send_err_callback);
886
887 port_guid = p_user_bind->port_guid;
888
889 OSM_LOG(p_vend->p_log, OSM_LOG_INFO,
890 "Mgmt class 0x%02x binding to port GUID 0x%" PRIx64 "\n",
891 p_user_bind->mad_class, cl_ntoh64(port_guid));
892
893 if ((umad_port_id = osm_vendor_open_port(p_vend, port_guid)) < 0) {
894 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5424: "
895 "Unable to open port 0x%" PRIx64 "\n",
896 cl_ntoh64(port_guid));
897 goto Exit;
898 }
899
900 if (umad_get_issm_path(p_vend->umad_port.ca_name,
901 p_vend->umad_port.portnum,
902 p_vend->issm_path,
903 sizeof(p_vend->issm_path)) < 0) {
904 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 542E: "
905 "Cannot resolve issm path for port %s:%u\n",
906 p_vend->umad_port.ca_name, p_vend->umad_port.portnum);
907 goto Exit;
908 }
909
910 if (!(p_bind = malloc(sizeof(*p_bind)))) {
911 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5425: "
912 "Unable to allocate internal bind object\n");
913 goto Exit;
914 }
915
916 memset(p_bind, 0, sizeof(*p_bind));
917 p_bind->p_vend = p_vend;
918 p_bind->port_id = umad_port_id;
919 p_bind->client_context = context;
920 p_bind->mad_recv_callback = mad_recv_callback;
921 p_bind->send_err_callback = send_err_callback;
922 p_bind->p_mad_pool = p_mad_pool;
923 p_bind->port_guid = port_guid;
924 p_bind->timeout = p_user_bind->timeout ? p_user_bind->timeout :
925 p_vend->timeout;
926 p_bind->max_retries = p_user_bind->retries ? p_user_bind->retries :
927 p_vend->max_retries;
928
929 memset(method_mask, 0, sizeof method_mask);
930 if (p_user_bind->is_responder) {
931 set_bit(IB_MAD_METHOD_GET, &method_mask);
932 set_bit(IB_MAD_METHOD_SET, &method_mask);
933 if (p_user_bind->mad_class == IB_MCLASS_SUBN_ADM) {
934 set_bit(IB_MAD_METHOD_GETTABLE, &method_mask);
935 set_bit(IB_MAD_METHOD_DELETE, &method_mask);
936 #ifdef DUAL_SIDED_RMPP
937 set_bit(IB_MAD_METHOD_GETMULTI, &method_mask);
938 #endif
939 /* Add in IB_MAD_METHOD_GETTRACETABLE */
940 /* when supported by OpenSM */
941 }
942 }
943 if (p_user_bind->is_report_processor)
944 set_bit(IB_MAD_METHOD_REPORT, &method_mask);
945 if (p_user_bind->is_trap_processor) {
946 set_bit(IB_MAD_METHOD_TRAP, &method_mask);
947 set_bit(IB_MAD_METHOD_TRAP_REPRESS, &method_mask);
948 }
949 #ifndef VENDOR_RMPP_SUPPORT
950 rmpp_version = 0;
951 #else
952 /* If SA class, set rmpp_version */
953 if (p_user_bind->mad_class == IB_MCLASS_SUBN_ADM)
954 rmpp_version = 1;
955 else
956 rmpp_version = 0;
957 #endif
958
959 if ((p_bind->agent_id = umad_register(p_vend->umad_port_id,
960 p_user_bind->mad_class,
961 p_user_bind->class_version,
962 rmpp_version, method_mask)) < 0) {
963 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5426: "
964 "Unable to register class %u version %u\n",
965 p_user_bind->mad_class, p_user_bind->class_version);
966 free(p_bind);
967 p_bind = 0;
968 goto Exit;
969 }
970
971 if (p_bind->agent_id >= OSM_UMAD_MAX_AGENTS ||
972 p_vend->agents[p_bind->agent_id]) {
973 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5427: "
974 "bad agent id %u or duplicate agent for class %u vers %u\n",
975 p_bind->agent_id, p_user_bind->mad_class,
976 p_user_bind->class_version);
977 free(p_bind);
978 p_bind = 0;
979 goto Exit;
980 }
981
982 p_vend->agents[p_bind->agent_id] = p_bind;
983
984 /* If Subn Directed Route class, register Subn LID routed class */
985 if (p_user_bind->mad_class == IB_MCLASS_SUBN_DIR) {
986 if ((p_bind->agent_id1 = umad_register(p_vend->umad_port_id,
987 IB_MCLASS_SUBN_LID,
988 p_user_bind->
989 class_version, 0,
990 method_mask)) < 0) {
991 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5428: "
992 "Unable to register class 1 version %u\n",
993 p_user_bind->class_version);
994 free(p_bind);
995 p_bind = 0;
996 goto Exit;
997 }
998
999 if (p_bind->agent_id1 >= OSM_UMAD_MAX_AGENTS ||
1000 p_vend->agents[p_bind->agent_id1]) {
1001 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5429: "
1002 "bad agent id %u or duplicate agent for class 1 vers %u\n",
1003 p_bind->agent_id1, p_user_bind->class_version);
1004 free(p_bind);
1005 p_bind = 0;
1006 goto Exit;
1007 }
1008
1009 p_vend->agents[p_bind->agent_id1] = p_bind;
1010 }
1011
1012 Exit:
1013 OSM_LOG_EXIT(p_vend->p_log);
1014 return ((osm_bind_handle_t) p_bind);
1015 }
1016
1017 static void
__osm_vendor_recv_dummy_cb(IN osm_madw_t * p_madw,IN void * bind_context,IN osm_madw_t * p_req_madw)1018 __osm_vendor_recv_dummy_cb(IN osm_madw_t * p_madw,
1019 IN void *bind_context, IN osm_madw_t * p_req_madw)
1020 {
1021 #ifdef _DEBUG_
1022 fprintf(stderr,
1023 "__osm_vendor_recv_dummy_cb: Ignoring received MAD after osm_vendor_unbind\n");
1024 #endif
1025 }
1026
1027 static void
__osm_vendor_send_err_dummy_cb(IN void * bind_context,IN osm_madw_t * p_req_madw)1028 __osm_vendor_send_err_dummy_cb(IN void *bind_context,
1029 IN osm_madw_t * p_req_madw)
1030 {
1031 #ifdef _DEBUG_
1032 fprintf(stderr,
1033 "__osm_vendor_send_err_dummy_cb: Ignoring send error after osm_vendor_unbind\n");
1034 #endif
1035 }
1036
osm_vendor_unbind(IN osm_bind_handle_t h_bind)1037 void osm_vendor_unbind(IN osm_bind_handle_t h_bind)
1038 {
1039 osm_umad_bind_info_t *p_bind = (osm_umad_bind_info_t *) h_bind;
1040 osm_vendor_t *p_vend = p_bind->p_vend;
1041
1042 OSM_LOG_ENTER(p_vend->p_log);
1043
1044 pthread_mutex_lock(&p_vend->cb_mutex);
1045 p_bind->mad_recv_callback = __osm_vendor_recv_dummy_cb;
1046 p_bind->send_err_callback = __osm_vendor_send_err_dummy_cb;
1047 pthread_mutex_unlock(&p_vend->cb_mutex);
1048
1049 OSM_LOG_EXIT(p_vend->p_log);
1050 }
1051
osm_vendor_get(IN osm_bind_handle_t h_bind,IN const uint32_t mad_size,IN osm_vend_wrap_t * const p_vw)1052 ib_mad_t *osm_vendor_get(IN osm_bind_handle_t h_bind,
1053 IN const uint32_t mad_size,
1054 IN osm_vend_wrap_t * const p_vw)
1055 {
1056 osm_umad_bind_info_t *p_bind = (osm_umad_bind_info_t *) h_bind;
1057 osm_vendor_t *p_vend = p_bind->p_vend;
1058
1059 OSM_LOG_ENTER(p_vend->p_log);
1060
1061 OSM_LOG(p_vend->p_log, OSM_LOG_DEBUG,
1062 "Acquiring UMAD for p_madw = %p, size = %u\n", p_vw, mad_size);
1063 CL_ASSERT(p_vw);
1064 p_vw->size = mad_size;
1065 p_vw->umad = umad_alloc(1, mad_size + umad_size());
1066
1067 /* track locally */
1068 p_vw->h_bind = h_bind;
1069
1070 OSM_LOG(p_vend->p_log, OSM_LOG_DEBUG,
1071 "Acquired UMAD %p, size = %u\n", p_vw->umad, p_vw->size);
1072
1073 OSM_LOG_EXIT(p_vend->p_log);
1074 return (p_vw->umad ? umad_get_mad(p_vw->umad) : NULL);
1075 }
1076
1077 void
osm_vendor_put(IN osm_bind_handle_t h_bind,IN osm_vend_wrap_t * const p_vw)1078 osm_vendor_put(IN osm_bind_handle_t h_bind, IN osm_vend_wrap_t * const p_vw)
1079 {
1080 osm_umad_bind_info_t *p_bind = (osm_umad_bind_info_t *) h_bind;
1081 osm_vendor_t *p_vend = p_bind->p_vend;
1082 osm_madw_t *p_madw;
1083
1084 OSM_LOG_ENTER(p_vend->p_log);
1085
1086 CL_ASSERT(p_vw);
1087
1088 OSM_LOG(p_vend->p_log, OSM_LOG_DEBUG, "Retiring UMAD %p\n", p_vw->umad);
1089
1090 /*
1091 * We moved the removal of the transaction to immediately after
1092 * it was looked up.
1093 */
1094
1095 /* free the mad but the wrapper is part of the madw object */
1096 umad_free(p_vw->umad);
1097 p_vw->umad = 0;
1098 p_madw = PARENT_STRUCT(p_vw, osm_madw_t, vend_wrap);
1099 p_madw->p_mad = NULL;
1100
1101 OSM_LOG_EXIT(p_vend->p_log);
1102 }
1103
1104 ib_api_status_t
osm_vendor_send(IN osm_bind_handle_t h_bind,IN osm_madw_t * const p_madw,IN boolean_t const resp_expected)1105 osm_vendor_send(IN osm_bind_handle_t h_bind,
1106 IN osm_madw_t * const p_madw, IN boolean_t const resp_expected)
1107 {
1108 osm_umad_bind_info_t *const p_bind = h_bind;
1109 osm_vendor_t *const p_vend = p_bind->p_vend;
1110 osm_vend_wrap_t *const p_vw = osm_madw_get_vend_ptr(p_madw);
1111 osm_mad_addr_t *const p_mad_addr = osm_madw_get_mad_addr_ptr(p_madw);
1112 ib_mad_t *const p_mad = osm_madw_get_mad_ptr(p_madw);
1113 ib_sa_mad_t *const p_sa = (ib_sa_mad_t *) p_mad;
1114 ib_mad_addr_t mad_addr;
1115 int ret = -1;
1116 int __attribute__((__unused__)) is_rmpp = 0;
1117 uint32_t sent_mad_size;
1118 uint64_t tid;
1119 #ifndef VENDOR_RMPP_SUPPORT
1120 uint32_t paylen = 0;
1121 #endif
1122
1123 OSM_LOG_ENTER(p_vend->p_log);
1124
1125 CL_ASSERT(p_vw->h_bind == h_bind);
1126 CL_ASSERT(p_mad == umad_get_mad(p_vw->umad));
1127
1128 if (p_mad->mgmt_class == IB_MCLASS_SUBN_DIR) {
1129 umad_set_addr_net(p_vw->umad, 0xffff, 0, 0, 0);
1130 umad_set_grh(p_vw->umad, NULL);
1131 goto Resp;
1132 }
1133 if (p_mad->mgmt_class == IB_MCLASS_SUBN_LID) {
1134 umad_set_addr_net(p_vw->umad, p_mad_addr->dest_lid, 0, 0, 0);
1135 umad_set_grh(p_vw->umad, NULL);
1136 goto Resp;
1137 }
1138 /* GS classes */
1139 umad_set_addr_net(p_vw->umad, p_mad_addr->dest_lid,
1140 p_mad_addr->addr_type.gsi.remote_qp,
1141 p_mad_addr->addr_type.gsi.service_level,
1142 IB_QP1_WELL_KNOWN_Q_KEY);
1143 if (p_mad_addr->addr_type.gsi.global_route) {
1144 mad_addr.grh_present = 1;
1145 mad_addr.gid_index = 0;
1146 mad_addr.hop_limit = p_mad_addr->addr_type.gsi.grh_info.hop_limit;
1147 ib_grh_get_ver_class_flow(p_mad_addr->addr_type.gsi.grh_info.ver_class_flow,
1148 NULL, &mad_addr.traffic_class,
1149 &mad_addr.flow_label);
1150 memcpy(&mad_addr.gid, &p_mad_addr->addr_type.gsi.grh_info.dest_gid, 16);
1151 umad_set_grh(p_vw->umad, &mad_addr);
1152 } else
1153 umad_set_grh(p_vw->umad, NULL);
1154 umad_set_pkey(p_vw->umad, p_mad_addr->addr_type.gsi.pkey_ix);
1155 if (ib_class_is_rmpp(p_mad->mgmt_class)) { /* RMPP GS classes */
1156 if (!ib_rmpp_is_flag_set((ib_rmpp_mad_t *) p_sa,
1157 IB_RMPP_FLAG_ACTIVE)) {
1158 /* Clear RMPP header when RMPP not ACTIVE */
1159 p_sa->rmpp_version = 0;
1160 p_sa->rmpp_type = 0;
1161 p_sa->rmpp_flags = 0;
1162 p_sa->rmpp_status = 0;
1163 #ifdef VENDOR_RMPP_SUPPORT
1164 } else
1165 is_rmpp = 1;
1166 OSM_LOG(p_vend->p_log, OSM_LOG_DEBUG, "RMPP %d length %d\n",
1167 ib_rmpp_is_flag_set((ib_rmpp_mad_t *) p_sa,
1168 IB_RMPP_FLAG_ACTIVE),
1169 p_madw->mad_size);
1170 #else
1171 } else {
1172 p_sa->rmpp_version = 1;
1173 p_sa->seg_num = cl_ntoh32(1); /* first DATA is seg 1 */
1174 p_sa->rmpp_flags |= (uint8_t) 0x70; /* RRespTime of 14 (high 5 bits) */
1175 p_sa->rmpp_status = 0;
1176 paylen = p_madw->mad_size - IB_SA_MAD_HDR_SIZE;
1177 paylen += (IB_SA_MAD_HDR_SIZE - MAD_RMPP_HDR_SIZE);
1178 p_sa->paylen_newwin = cl_ntoh32(paylen);
1179 }
1180 #endif
1181 }
1182
1183 Resp:
1184 if (resp_expected)
1185 put_madw(p_vend, p_madw, p_mad->trans_id, p_mad->mgmt_class);
1186
1187 #ifdef VENDOR_RMPP_SUPPORT
1188 sent_mad_size = p_madw->mad_size;
1189 #else
1190 sent_mad_size = is_rmpp ? p_madw->mad_size - IB_SA_MAD_HDR_SIZE :
1191 p_madw->mad_size;
1192 #endif
1193 tid = cl_ntoh64(p_mad->trans_id);
1194 if ((ret = umad_send(p_bind->port_id, p_bind->agent_id, p_vw->umad,
1195 sent_mad_size,
1196 resp_expected ? p_bind->timeout : 0,
1197 p_bind->max_retries)) < 0) {
1198 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5430: "
1199 "Send p_madw = %p of size %d, Class 0x%x, Method 0x%X, "
1200 "Attr 0x%X, TID 0x%" PRIx64 " failed %d (%m)\n",
1201 p_madw, sent_mad_size, p_mad->mgmt_class,
1202 p_mad->method, cl_ntoh16(p_mad->attr_id), tid, ret);
1203 if (resp_expected) {
1204 get_madw(p_vend, &p_mad->trans_id,
1205 p_mad->mgmt_class); /* remove from aging table */
1206 p_madw->status = IB_ERROR;
1207 pthread_mutex_lock(&p_vend->cb_mutex);
1208 (*p_bind->send_err_callback) (p_bind->client_context, p_madw); /* cb frees madw */
1209 pthread_mutex_unlock(&p_vend->cb_mutex);
1210 } else
1211 osm_mad_pool_put(p_bind->p_mad_pool, p_madw);
1212 goto Exit;
1213 }
1214
1215 if (!resp_expected)
1216 osm_mad_pool_put(p_bind->p_mad_pool, p_madw);
1217
1218 OSM_LOG(p_vend->p_log, OSM_LOG_DEBUG, "Completed sending %s TID 0x%" PRIx64 "\n",
1219 resp_expected ? "request" : "response or unsolicited", tid);
1220 Exit:
1221 OSM_LOG_EXIT(p_vend->p_log);
1222 return (ret);
1223 }
1224
osm_vendor_local_lid_change(IN osm_bind_handle_t h_bind)1225 ib_api_status_t osm_vendor_local_lid_change(IN osm_bind_handle_t h_bind)
1226 {
1227 osm_umad_bind_info_t *p_bind = (osm_umad_bind_info_t *) h_bind;
1228 osm_vendor_t *p_vend = p_bind->p_vend;
1229
1230 OSM_LOG_ENTER(p_vend->p_log);
1231 ;
1232 OSM_LOG_EXIT(p_vend->p_log);
1233 return (0);
1234 }
1235
osm_vendor_set_sm(IN osm_bind_handle_t h_bind,IN boolean_t is_sm_val)1236 void osm_vendor_set_sm(IN osm_bind_handle_t h_bind, IN boolean_t is_sm_val)
1237 {
1238 osm_umad_bind_info_t *p_bind = (osm_umad_bind_info_t *) h_bind;
1239 osm_vendor_t *p_vend = p_bind->p_vend;
1240
1241 OSM_LOG_ENTER(p_vend->p_log);
1242 if (TRUE == is_sm_val) {
1243 p_vend->issmfd = open(p_vend->issm_path, O_NONBLOCK);
1244 if (p_vend->issmfd < 0) {
1245 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5431: "
1246 "setting IS_SM capmask: cannot open file "
1247 "\'%s\': %s\n",
1248 p_vend->issm_path, strerror(errno));
1249 p_vend->issmfd = -1;
1250 }
1251 } else if (p_vend->issmfd != -1) {
1252 if (0 != close(p_vend->issmfd))
1253 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5432: "
1254 "clearing IS_SM capmask: cannot close: %s\n",
1255 strerror(errno));
1256 p_vend->issmfd = -1;
1257 }
1258 OSM_LOG_EXIT(p_vend->p_log);
1259 }
1260
osm_vendor_set_debug(IN osm_vendor_t * const p_vend,IN int32_t level)1261 void osm_vendor_set_debug(IN osm_vendor_t * const p_vend, IN int32_t level)
1262 {
1263 umad_debug(level);
1264 }
1265
1266 #endif /* OSM_VENDOR_INTF_OPENIB */
1267