1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * This RCM module adds support to the RCM framework for IBPART links
27 */
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <sys/types.h>
34 #include <synch.h>
35 #include <assert.h>
36 #include <strings.h>
37 #include "rcm_module.h"
38 #include <libintl.h>
39 #include <libdllink.h>
40 #include <libdlib.h>
41 #include <libdlpi.h>
42
43 /*
44 * Definitions
45 */
46 #ifndef lint
47 #define _(x) gettext(x)
48 #else
49 #define _(x) x
50 #endif
51
52 /* Some generic well-knowns and defaults used in this module */
53 #define RCM_LINK_PREFIX "SUNW_datalink" /* RCM datalink name prefix */
54 #define RCM_LINK_RESOURCE_MAX (13 + LINKID_STR_WIDTH)
55
56 /* IBPART link flags */
57 typedef enum {
58 IBPART_OFFLINED = 0x1,
59 IBPART_CONSUMER_OFFLINED = 0x2,
60 IBPART_STALE = 0x4
61 } ibpart_flag_t;
62
63 /* link representation */
64 typedef struct dl_ibpart {
65 struct dl_ibpart *dlib_next; /* next IBPART on this link */
66 struct dl_ibpart *dlib_prev; /* prev IBPART on this link */
67 datalink_id_t dlib_ibpart_id;
68 ibpart_flag_t dlib_flags; /* IBPART link flags */
69 } dl_ibpart_t;
70
71 /* IBPART Cache state flags */
72 typedef enum {
73 CACHE_NODE_STALE = 0x1, /* stale cached data */
74 CACHE_NODE_NEW = 0x2, /* new cached nodes */
75 CACHE_NODE_OFFLINED = 0x4 /* nodes offlined */
76 } cache_node_state_t;
77
78 /* Network Cache lookup options */
79 #define CACHE_NO_REFRESH 0x1 /* cache refresh not needed */
80 #define CACHE_REFRESH 0x2 /* refresh cache */
81
82 /* Cache element */
83 typedef struct link_cache {
84 struct link_cache *pc_next; /* next cached resource */
85 struct link_cache *pc_prev; /* prev cached resource */
86 char *pc_resource; /* resource name */
87 datalink_id_t pc_linkid; /* linkid */
88 dl_ibpart_t *pc_ibpart; /* IBPART list on this link */
89 cache_node_state_t pc_state; /* cache state flags */
90 } link_cache_t;
91
92 /*
93 * Global cache for network IBPARTs
94 */
95 static link_cache_t cache_head;
96 static link_cache_t cache_tail;
97 static mutex_t cache_lock;
98 static int events_registered = 0;
99
100 static dladm_handle_t dld_handle = NULL;
101
102 /*
103 * RCM module interface prototypes
104 */
105 static int ibpart_register(rcm_handle_t *);
106 static int ibpart_unregister(rcm_handle_t *);
107 static int ibpart_get_info(rcm_handle_t *, char *, id_t, uint_t,
108 char **, char **, nvlist_t *, rcm_info_t **);
109 static int ibpart_suspend(rcm_handle_t *, char *, id_t,
110 timespec_t *, uint_t, char **, rcm_info_t **);
111 static int ibpart_resume(rcm_handle_t *, char *, id_t, uint_t,
112 char **, rcm_info_t **);
113 static int ibpart_offline(rcm_handle_t *, char *, id_t, uint_t,
114 char **, rcm_info_t **);
115 static int ibpart_undo_offline(rcm_handle_t *, char *, id_t,
116 uint_t, char **, rcm_info_t **);
117 static int ibpart_remove(rcm_handle_t *, char *, id_t, uint_t,
118 char **, rcm_info_t **);
119 static int ibpart_notify_event(rcm_handle_t *, char *, id_t,
120 uint_t, char **, nvlist_t *, rcm_info_t **);
121 static int ibpart_configure(rcm_handle_t *, datalink_id_t);
122
123 /* Module private routines */
124 static void cache_free();
125 static int cache_update(rcm_handle_t *);
126 static void cache_remove(link_cache_t *);
127 static void node_free(link_cache_t *);
128 static void cache_insert(link_cache_t *);
129 static link_cache_t *cache_lookup(rcm_handle_t *, char *, char);
130 static int ibpart_consumer_offline(rcm_handle_t *, link_cache_t *,
131 char **, uint_t, rcm_info_t **);
132 static void ibpart_consumer_online(rcm_handle_t *, link_cache_t *,
133 char **, uint_t, rcm_info_t **);
134 static int ibpart_offline_ibpart(link_cache_t *, uint32_t,
135 cache_node_state_t);
136 static void ibpart_online_ibpart(link_cache_t *);
137 static char *ibpart_usage(link_cache_t *);
138 static void ibpart_log_err(datalink_id_t, char **, char *);
139 static int ibpart_consumer_notify(rcm_handle_t *, datalink_id_t,
140 char **, uint_t, rcm_info_t **);
141
142 /* Module-Private data */
143 static struct rcm_mod_ops ibpart_ops =
144 {
145 RCM_MOD_OPS_VERSION,
146 ibpart_register,
147 ibpart_unregister,
148 ibpart_get_info,
149 ibpart_suspend,
150 ibpart_resume,
151 ibpart_offline,
152 ibpart_undo_offline,
153 ibpart_remove,
154 NULL,
155 NULL,
156 ibpart_notify_event
157 };
158
159 /*
160 * rcm_mod_init() - Update registrations, and return the ops structure.
161 */
162 struct rcm_mod_ops *
rcm_mod_init(void)163 rcm_mod_init(void)
164 {
165 char errmsg[DLADM_STRSIZE];
166 dladm_status_t status;
167
168 rcm_log_message(RCM_TRACE1, "IBPART: mod_init\n");
169
170 cache_head.pc_next = &cache_tail;
171 cache_head.pc_prev = NULL;
172 cache_tail.pc_prev = &cache_head;
173 cache_tail.pc_next = NULL;
174 (void) mutex_init(&cache_lock, 0, NULL);
175
176 if ((status = dladm_open(&dld_handle)) != DLADM_STATUS_OK) {
177 rcm_log_message(RCM_WARNING,
178 "IBPART: mod_init failed: cannot open datalink "
179 "handle: %s\n", dladm_status2str(status, errmsg));
180 return (NULL);
181 }
182
183 /* Return the ops vectors */
184 return (&ibpart_ops);
185 }
186
187 /*
188 * rcm_mod_info() - Return a string describing this module.
189 */
190 const char *
rcm_mod_info(void)191 rcm_mod_info(void)
192 {
193 rcm_log_message(RCM_TRACE1, "IBPART: mod_info\n");
194
195 return ("IBPART module");
196 }
197
198 /*
199 * rcm_mod_fini() - Destroy the network IBPART cache.
200 */
201 int
rcm_mod_fini(void)202 rcm_mod_fini(void)
203 {
204 rcm_log_message(RCM_TRACE1, "IBPART: mod_fini\n");
205
206 /*
207 * Note that ibpart_unregister() does not seem to be called anywhere,
208 * therefore we free the cache nodes here. In theory we should call
209 * rcm_register_interest() for each node before we free it, the
210 * framework does not provide the rcm_handle to allow us to do so.
211 */
212 cache_free();
213 (void) mutex_destroy(&cache_lock);
214
215 dladm_close(dld_handle);
216 return (RCM_SUCCESS);
217 }
218
219 /*
220 * ibpart_register() - Make sure the cache is properly sync'ed, and its
221 * registrations are in order.
222 */
223 static int
ibpart_register(rcm_handle_t * hd)224 ibpart_register(rcm_handle_t *hd)
225 {
226 rcm_log_message(RCM_TRACE1, "IBPART: register\n");
227
228 if (cache_update(hd) < 0)
229 return (RCM_FAILURE);
230
231 /*
232 * Need to register interest in all new resources
233 * getting attached, so we get attach event notifications
234 */
235 if (!events_registered) {
236 if (rcm_register_event(hd, RCM_RESOURCE_LINK_NEW, 0, NULL)
237 != RCM_SUCCESS) {
238 rcm_log_message(RCM_ERROR,
239 _("IBPART: failed to register %s\n"),
240 RCM_RESOURCE_LINK_NEW);
241 return (RCM_FAILURE);
242 } else {
243 rcm_log_message(RCM_DEBUG, "IBPART: registered %s\n",
244 RCM_RESOURCE_LINK_NEW);
245 events_registered++;
246 }
247 }
248
249 return (RCM_SUCCESS);
250 }
251
252 /*
253 * ibpart_unregister() - Walk the cache, unregistering all the networks.
254 */
255 static int
ibpart_unregister(rcm_handle_t * hd)256 ibpart_unregister(rcm_handle_t *hd)
257 {
258 link_cache_t *node;
259
260 rcm_log_message(RCM_TRACE1, "IBPART: unregister\n");
261
262 /* Walk the cache, unregistering everything */
263 (void) mutex_lock(&cache_lock);
264 node = cache_head.pc_next;
265 while (node != &cache_tail) {
266 if (rcm_unregister_interest(hd, node->pc_resource, 0)
267 != RCM_SUCCESS) {
268 rcm_log_message(RCM_ERROR,
269 _("IBPART: failed to unregister %s\n"),
270 node->pc_resource);
271 (void) mutex_unlock(&cache_lock);
272 return (RCM_FAILURE);
273 }
274 cache_remove(node);
275 node_free(node);
276 node = cache_head.pc_next;
277 }
278 (void) mutex_unlock(&cache_lock);
279
280 /*
281 * Unregister interest in all new resources
282 */
283 if (events_registered) {
284 if (rcm_unregister_event(hd, RCM_RESOURCE_LINK_NEW, 0)
285 != RCM_SUCCESS) {
286 rcm_log_message(RCM_ERROR,
287 _("IBPART: failed to unregister %s\n"),
288 RCM_RESOURCE_LINK_NEW);
289 return (RCM_FAILURE);
290 } else {
291 rcm_log_message(RCM_DEBUG, "IBPART: unregistered %s\n",
292 RCM_RESOURCE_LINK_NEW);
293 events_registered--;
294 }
295 }
296
297 return (RCM_SUCCESS);
298 }
299
300 /*
301 * ibpart_offline() - Offline IBPARTs on a specific node.
302 */
303 static int
ibpart_offline(rcm_handle_t * hd,char * rsrc,id_t id,uint_t flags,char ** errorp,rcm_info_t ** info)304 ibpart_offline(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
305 char **errorp, rcm_info_t **info)
306 {
307 link_cache_t *node;
308
309 rcm_log_message(RCM_TRACE1, "IBPART: offline(%s)\n", rsrc);
310
311 /* Lock the cache and lookup the resource */
312 (void) mutex_lock(&cache_lock);
313 node = cache_lookup(hd, rsrc, CACHE_REFRESH);
314 if (node == NULL) {
315 /* should not happen because the resource is registered. */
316 ibpart_log_err(node->pc_linkid, errorp,
317 "unrecognized resource");
318 (void) mutex_unlock(&cache_lock);
319 return (RCM_SUCCESS);
320 }
321
322 /*
323 * Inform consumers (IP interfaces) of associated IBPARTs to be offlined
324 */
325 if (ibpart_consumer_offline(hd, node, errorp, flags, info) ==
326 RCM_SUCCESS) {
327 rcm_log_message(RCM_DEBUG,
328 "IBPART: consumers agreed on offline\n");
329 } else {
330 ibpart_log_err(node->pc_linkid, errorp,
331 "consumers failed to offline");
332 (void) mutex_unlock(&cache_lock);
333 return (RCM_FAILURE);
334 }
335
336 /* Check if it's a query */
337 if (flags & RCM_QUERY) {
338 rcm_log_message(RCM_TRACE1,
339 "IBPART: offline query succeeded(%s)\n", rsrc);
340 (void) mutex_unlock(&cache_lock);
341 return (RCM_SUCCESS);
342 }
343
344 if (ibpart_offline_ibpart(node, IBPART_OFFLINED, CACHE_NODE_OFFLINED) !=
345 RCM_SUCCESS) {
346 ibpart_online_ibpart(node);
347 ibpart_log_err(node->pc_linkid, errorp, "offline failed");
348 (void) mutex_unlock(&cache_lock);
349 return (RCM_FAILURE);
350 }
351
352 rcm_log_message(RCM_TRACE1, "IBPART: Offline succeeded(%s)\n", rsrc);
353 (void) mutex_unlock(&cache_lock);
354 return (RCM_SUCCESS);
355 }
356
357 /*
358 * ibpart_undo_offline() - Undo offline of a previously offlined node.
359 */
360 /*ARGSUSED*/
361 static int
ibpart_undo_offline(rcm_handle_t * hd,char * rsrc,id_t id,uint_t flags,char ** errorp,rcm_info_t ** info)362 ibpart_undo_offline(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
363 char **errorp, rcm_info_t **info)
364 {
365 link_cache_t *node;
366
367 rcm_log_message(RCM_TRACE1, "IBPART: online(%s)\n", rsrc);
368
369 (void) mutex_lock(&cache_lock);
370 node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH);
371 if (node == NULL) {
372 ibpart_log_err(DATALINK_INVALID_LINKID, errorp, "no such link");
373 (void) mutex_unlock(&cache_lock);
374 errno = ENOENT;
375 return (RCM_FAILURE);
376 }
377
378 /* Check if no attempt should be made to online the link here */
379 if (!(node->pc_state & CACHE_NODE_OFFLINED)) {
380 ibpart_log_err(node->pc_linkid, errorp, "link not offlined");
381 (void) mutex_unlock(&cache_lock);
382 errno = ENOTSUP;
383 return (RCM_SUCCESS);
384 }
385
386 ibpart_online_ibpart(node);
387
388 /*
389 * Inform IP interfaces on associated IBPARTs to be onlined
390 */
391 ibpart_consumer_online(hd, node, errorp, flags, info);
392
393 node->pc_state &= ~CACHE_NODE_OFFLINED;
394 rcm_log_message(RCM_TRACE1, "IBPART: online succeeded(%s)\n", rsrc);
395 (void) mutex_unlock(&cache_lock);
396 return (RCM_SUCCESS);
397 }
398
399 static void
ibpart_online_ibpart(link_cache_t * node)400 ibpart_online_ibpart(link_cache_t *node)
401 {
402 dl_ibpart_t *ibpart;
403 dladm_status_t status;
404 char errmsg[DLADM_STRSIZE];
405
406 /*
407 * Try to bring on all offlined IBPARTs
408 */
409 for (ibpart = node->pc_ibpart; ibpart != NULL;
410 ibpart = ibpart->dlib_next) {
411 if (!(ibpart->dlib_flags & IBPART_OFFLINED))
412 continue;
413
414 rcm_log_message(RCM_TRACE1, "IBPART: online DLID %d\n",
415 ibpart->dlib_ibpart_id);
416 if ((status = dladm_part_up(dld_handle,
417 ibpart->dlib_ibpart_id, 0)) != DLADM_STATUS_OK) {
418 /*
419 * Print a warning message and continue to online
420 * other IBPARTs.
421 */
422 rcm_log_message(RCM_WARNING,
423 _("IBPART: IBPART online failed (%u): %s\n"),
424 ibpart->dlib_ibpart_id,
425 dladm_status2str(status, errmsg));
426 } else {
427 ibpart->dlib_flags &= ~IBPART_OFFLINED;
428 }
429 }
430 }
431
432 static int
ibpart_offline_ibpart(link_cache_t * node,uint32_t flags,cache_node_state_t state)433 ibpart_offline_ibpart(link_cache_t *node, uint32_t flags,
434 cache_node_state_t state)
435 {
436 dl_ibpart_t *ibpart;
437 dladm_status_t status;
438 char errmsg[DLADM_STRSIZE];
439
440 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_offline_ibpart "
441 "(%s %u %u)\n", node->pc_resource, flags, state);
442
443 /*
444 * Try to delete all explicit created IBPART
445 */
446 for (ibpart = node->pc_ibpart; ibpart != NULL;
447 ibpart = ibpart->dlib_next) {
448 rcm_log_message(RCM_TRACE1, "IBPART: offline DLID %d\n",
449 ibpart->dlib_ibpart_id);
450 if ((status = dladm_part_delete(dld_handle,
451 ibpart->dlib_ibpart_id, DLADM_OPT_ACTIVE)) !=
452 DLADM_STATUS_OK) {
453 rcm_log_message(RCM_WARNING,
454 _("IBPART: IBPART offline failed (%u): %s\n"),
455 ibpart->dlib_ibpart_id,
456 dladm_status2str(status, errmsg));
457 return (RCM_FAILURE);
458 } else {
459 rcm_log_message(RCM_TRACE1,
460 "IBPART: IBPART offline succeeded(%u)\n",
461 ibpart->dlib_ibpart_id);
462 ibpart->dlib_flags |= flags;
463 }
464 }
465
466 node->pc_state |= state;
467 return (RCM_SUCCESS);
468 }
469
470 /*
471 * ibpart_get_info() - Gather usage information for this resource.
472 */
473 /*ARGSUSED*/
474 int
ibpart_get_info(rcm_handle_t * hd,char * rsrc,id_t id,uint_t flags,char ** usagep,char ** errorp,nvlist_t * props,rcm_info_t ** info)475 ibpart_get_info(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
476 char **usagep, char **errorp, nvlist_t *props, rcm_info_t **info)
477 {
478 link_cache_t *node;
479
480 rcm_log_message(RCM_TRACE1, "IBPART: get_info(%s)\n", rsrc);
481
482 (void) mutex_lock(&cache_lock);
483 node = cache_lookup(hd, rsrc, CACHE_REFRESH);
484 if (node == NULL) {
485 rcm_log_message(RCM_INFO,
486 _("IBPART: get_info(%s) unrecognized resource\n"), rsrc);
487 (void) mutex_unlock(&cache_lock);
488 errno = ENOENT;
489 return (RCM_FAILURE);
490 }
491
492 *usagep = ibpart_usage(node);
493 (void) mutex_unlock(&cache_lock);
494 if (*usagep == NULL) {
495 /* most likely malloc failure */
496 rcm_log_message(RCM_ERROR,
497 _("IBPART: get_info(%s) malloc failure\n"), rsrc);
498 (void) mutex_unlock(&cache_lock);
499 errno = ENOMEM;
500 return (RCM_FAILURE);
501 }
502
503 /* Set client/role properties */
504 (void) nvlist_add_string(props, RCM_CLIENT_NAME, "IBPART");
505
506 rcm_log_message(RCM_TRACE1, "IBPART: get_info(%s) info = %s\n",
507 rsrc, *usagep);
508 return (RCM_SUCCESS);
509 }
510
511 /*
512 * ibpart_suspend() - Nothing to do, always okay
513 */
514 /*ARGSUSED*/
515 static int
ibpart_suspend(rcm_handle_t * hd,char * rsrc,id_t id,timespec_t * interval,uint_t flags,char ** errorp,rcm_info_t ** info)516 ibpart_suspend(rcm_handle_t *hd, char *rsrc, id_t id, timespec_t *interval,
517 uint_t flags, char **errorp, rcm_info_t **info)
518 {
519 rcm_log_message(RCM_TRACE1, "IBPART: suspend(%s)\n", rsrc);
520 return (RCM_SUCCESS);
521 }
522
523 /*
524 * ibpart_resume() - Nothing to do, always okay
525 */
526 /*ARGSUSED*/
527 static int
ibpart_resume(rcm_handle_t * hd,char * rsrc,id_t id,uint_t flags,char ** errorp,rcm_info_t ** info)528 ibpart_resume(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
529 char **errorp, rcm_info_t **info)
530 {
531 rcm_log_message(RCM_TRACE1, "IBPART: resume(%s)\n", rsrc);
532 return (RCM_SUCCESS);
533 }
534
535 /*
536 * ibpart_consumer_remove()
537 *
538 * Notify IBPART consumers to remove cache.
539 */
540 static int
ibpart_consumer_remove(rcm_handle_t * hd,link_cache_t * node,uint_t flags,rcm_info_t ** info)541 ibpart_consumer_remove(rcm_handle_t *hd, link_cache_t *node, uint_t flags,
542 rcm_info_t **info)
543 {
544 dl_ibpart_t *ibpart = NULL;
545 char rsrc[RCM_LINK_RESOURCE_MAX];
546 int ret = RCM_SUCCESS;
547
548 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_remove (%s)\n",
549 node->pc_resource);
550
551 for (ibpart = node->pc_ibpart; ibpart != NULL;
552 ibpart = ibpart->dlib_next) {
553
554 /*
555 * This will only be called when the offline operation
556 * succeeds, so the IBPART consumers must have been offlined
557 * at this point.
558 */
559 assert(ibpart->dlib_flags & IBPART_CONSUMER_OFFLINED);
560
561 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
562 RCM_LINK_PREFIX, ibpart->dlib_ibpart_id);
563
564 ret = rcm_notify_remove(hd, rsrc, flags, info);
565 if (ret != RCM_SUCCESS) {
566 rcm_log_message(RCM_WARNING,
567 _("IBPART: notify remove failed (%s)\n"), rsrc);
568 break;
569 }
570 }
571
572 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_remove done\n");
573 return (ret);
574 }
575
576 /*
577 * ibpart_remove() - remove a resource from cache
578 */
579 /*ARGSUSED*/
580 static int
ibpart_remove(rcm_handle_t * hd,char * rsrc,id_t id,uint_t flags,char ** errorp,rcm_info_t ** info)581 ibpart_remove(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
582 char **errorp, rcm_info_t **info)
583 {
584 link_cache_t *node;
585 int rv;
586
587 rcm_log_message(RCM_TRACE1, "IBPART: remove(%s)\n", rsrc);
588
589 (void) mutex_lock(&cache_lock);
590 node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH);
591 if (node == NULL) {
592 rcm_log_message(RCM_INFO,
593 _("IBPART: remove(%s) unrecognized resource\n"), rsrc);
594 (void) mutex_unlock(&cache_lock);
595 errno = ENOENT;
596 return (RCM_FAILURE);
597 }
598
599 /* remove the cached entry for the resource */
600 cache_remove(node);
601 (void) mutex_unlock(&cache_lock);
602
603 rv = ibpart_consumer_remove(hd, node, flags, info);
604 node_free(node);
605 return (rv);
606 }
607
608 /*
609 * ibpart_notify_event - Project private implementation to receive new resource
610 * events. It intercepts all new resource events. If the
611 * new resource is a network resource, pass up a notify
612 * for it too. The new resource need not be cached, since
613 * it is done at register again.
614 */
615 /*ARGSUSED*/
616 static int
ibpart_notify_event(rcm_handle_t * hd,char * rsrc,id_t id,uint_t flags,char ** errorp,nvlist_t * nvl,rcm_info_t ** info)617 ibpart_notify_event(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
618 char **errorp, nvlist_t *nvl, rcm_info_t **info)
619 {
620 nvpair_t *nvp = NULL;
621 datalink_id_t linkid;
622 uint64_t id64;
623 int rv = RCM_SUCCESS;
624
625 rcm_log_message(RCM_TRACE1, "IBPART: notify_event(%s)\n", rsrc);
626
627 if (strcmp(rsrc, RCM_RESOURCE_LINK_NEW) != 0) {
628 ibpart_log_err(DATALINK_INVALID_LINKID, errorp,
629 "unrecognized event");
630 errno = EINVAL;
631 return (RCM_FAILURE);
632 }
633
634 /* Update cache to reflect latest IBPARTs */
635 if (cache_update(hd) < 0) {
636 ibpart_log_err(DATALINK_INVALID_LINKID, errorp,
637 "private Cache update failed");
638 return (RCM_FAILURE);
639 }
640
641 /*
642 * Try best to recover all configuration.
643 */
644 rcm_log_message(RCM_DEBUG, "IBPART: process_nvlist\n");
645 while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
646 if (strcmp(nvpair_name(nvp), RCM_NV_LINKID) != 0)
647 continue;
648
649 if (nvpair_value_uint64(nvp, &id64) != 0) {
650 ibpart_log_err(DATALINK_INVALID_LINKID, errorp,
651 "cannot get linkid");
652 rv = RCM_FAILURE;
653 continue;
654 }
655
656 linkid = (datalink_id_t)id64;
657 if (ibpart_configure(hd, linkid) != 0) {
658 ibpart_log_err(linkid, errorp, "configuring failed");
659 rv = RCM_FAILURE;
660 continue;
661 }
662
663 /* Notify all IBPART consumers */
664 if (ibpart_consumer_notify(hd, linkid, errorp, flags,
665 info) != 0) {
666 ibpart_log_err(linkid, errorp,
667 "consumer notify failed");
668 rv = RCM_FAILURE;
669 }
670 }
671
672 rcm_log_message(RCM_TRACE1,
673 "IBPART: notify_event: link configuration complete\n");
674 return (rv);
675 }
676
677 /*
678 * ibpart_usage - Determine the usage of a link.
679 * The returned buffer is owned by caller, and the caller
680 * must free it up when done.
681 */
682 static char *
ibpart_usage(link_cache_t * node)683 ibpart_usage(link_cache_t *node)
684 {
685 dl_ibpart_t *ibpart;
686 int nibpart;
687 char *buf;
688 const char *fmt;
689 char *sep;
690 char errmsg[DLADM_STRSIZE];
691 char name[MAXLINKNAMELEN];
692 dladm_status_t status;
693 size_t bufsz;
694
695 rcm_log_message(RCM_TRACE2, "IBPART: usage(%s)\n", node->pc_resource);
696
697 assert(MUTEX_HELD(&cache_lock));
698 if ((status = dladm_datalink_id2info(dld_handle, node->pc_linkid, NULL,
699 NULL, NULL, name, sizeof (name))) != DLADM_STATUS_OK) {
700 rcm_log_message(RCM_ERROR,
701 _("IBPART: usage(%s) get link name failure(%s)\n"),
702 node->pc_resource, dladm_status2str(status, errmsg));
703 return (NULL);
704 }
705
706 if (node->pc_state & CACHE_NODE_OFFLINED)
707 fmt = _("%1$s offlined");
708 else
709 fmt = _("%1$s IBPART: ");
710
711 /* TRANSLATION_NOTE: separator used between IBPART linkids */
712 sep = _(", ");
713
714 nibpart = 0;
715 for (ibpart = node->pc_ibpart; ibpart != NULL;
716 ibpart = ibpart->dlib_next)
717 nibpart++;
718
719 /* space for IBPARTs and separators, plus message */
720 bufsz = nibpart * (MAXLINKNAMELEN + strlen(sep)) +
721 strlen(fmt) + MAXLINKNAMELEN + 1;
722 if ((buf = malloc(bufsz)) == NULL) {
723 rcm_log_message(RCM_ERROR,
724 _("IBPART: usage(%s) malloc failure(%s)\n"),
725 node->pc_resource, strerror(errno));
726 return (NULL);
727 }
728 (void) snprintf(buf, bufsz, fmt, name);
729
730 if (node->pc_state & CACHE_NODE_OFFLINED) {
731 /* Nothing else to do */
732 rcm_log_message(RCM_TRACE2, "IBPART: usage (%s) info = %s\n",
733 node->pc_resource, buf);
734 return (buf);
735 }
736
737 for (ibpart = node->pc_ibpart; ibpart != NULL;
738 ibpart = ibpart->dlib_next) {
739 rcm_log_message(RCM_DEBUG, "IBPART:= %u\n",
740 ibpart->dlib_ibpart_id);
741
742 if ((status = dladm_datalink_id2info(dld_handle,
743 ibpart->dlib_ibpart_id, NULL, NULL, NULL, name,
744 sizeof (name))) != DLADM_STATUS_OK) {
745 rcm_log_message(RCM_ERROR,
746 _("IBPART: usage(%s) get ibpart %u name "
747 "failure(%s)\n"), node->pc_resource,
748 ibpart->dlib_ibpart_id,
749 dladm_status2str(status, errmsg));
750 free(buf);
751 return (NULL);
752 }
753
754 (void) strlcat(buf, name, bufsz);
755 if (ibpart->dlib_next != NULL)
756 (void) strlcat(buf, sep, bufsz);
757 }
758
759 rcm_log_message(RCM_TRACE2, "IBPART: usage (%s) info = %s\n",
760 node->pc_resource, buf);
761
762 return (buf);
763 }
764
765 /*
766 * Cache management routines, all cache management functions should be
767 * be called with cache_lock held.
768 */
769
770 /*
771 * cache_lookup() - Get a cache node for a resource.
772 * Call with cache lock held.
773 *
774 * This ensures that the cache is consistent with the system state and
775 * returns a pointer to the cache element corresponding to the resource.
776 */
777 static link_cache_t *
cache_lookup(rcm_handle_t * hd,char * rsrc,char options)778 cache_lookup(rcm_handle_t *hd, char *rsrc, char options)
779 {
780 link_cache_t *node;
781
782 rcm_log_message(RCM_TRACE2, "IBPART: cache lookup(%s)\n", rsrc);
783
784 assert(MUTEX_HELD(&cache_lock));
785 if (options & CACHE_REFRESH) {
786 /* drop lock since update locks cache again */
787 (void) mutex_unlock(&cache_lock);
788 (void) cache_update(hd);
789 (void) mutex_lock(&cache_lock);
790 }
791
792 node = cache_head.pc_next;
793 for (; node != &cache_tail; node = node->pc_next) {
794 if (strcmp(rsrc, node->pc_resource) == 0) {
795 rcm_log_message(RCM_TRACE2,
796 "IBPART: cache lookup succeeded(%s)\n", rsrc);
797 return (node);
798 }
799 }
800 return (NULL);
801 }
802
803 /*
804 * node_free - Free a node from the cache
805 */
806 static void
node_free(link_cache_t * node)807 node_free(link_cache_t *node)
808 {
809 dl_ibpart_t *ibpart, *next;
810
811 if (node != NULL) {
812 free(node->pc_resource);
813
814 /* free the IBPART list */
815 for (ibpart = node->pc_ibpart; ibpart != NULL; ibpart = next) {
816 next = ibpart->dlib_next;
817 free(ibpart);
818 }
819 free(node);
820 }
821 }
822
823 /*
824 * cache_insert - Insert a resource node in cache
825 */
826 static void
cache_insert(link_cache_t * node)827 cache_insert(link_cache_t *node)
828 {
829 assert(MUTEX_HELD(&cache_lock));
830
831 /* insert at the head for best performance */
832 node->pc_next = cache_head.pc_next;
833 node->pc_prev = &cache_head;
834
835 node->pc_next->pc_prev = node;
836 node->pc_prev->pc_next = node;
837 }
838
839 /*
840 * cache_remove() - Remove a resource node from cache.
841 */
842 static void
cache_remove(link_cache_t * node)843 cache_remove(link_cache_t *node)
844 {
845 assert(MUTEX_HELD(&cache_lock));
846 node->pc_next->pc_prev = node->pc_prev;
847 node->pc_prev->pc_next = node->pc_next;
848 node->pc_next = NULL;
849 node->pc_prev = NULL;
850 }
851
852 typedef struct ibpart_update_arg_s {
853 rcm_handle_t *hd;
854 int retval;
855 } ibpart_update_arg_t;
856
857 /*
858 * ibpart_update() - Update physical interface properties
859 */
860 static int
ibpart_update(dladm_handle_t handle,datalink_id_t ibpartid,void * arg)861 ibpart_update(dladm_handle_t handle, datalink_id_t ibpartid, void *arg)
862 {
863 ibpart_update_arg_t *ibpart_update_argp = arg;
864 rcm_handle_t *hd = ibpart_update_argp->hd;
865 link_cache_t *node;
866 dl_ibpart_t *ibpart;
867 char *rsrc;
868 dladm_ib_attr_t ibpart_attr;
869 dladm_status_t status;
870 char errmsg[DLADM_STRSIZE];
871 boolean_t newnode = B_FALSE;
872 int ret = -1;
873
874 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_update(%u)\n", ibpartid);
875
876 assert(MUTEX_HELD(&cache_lock));
877 status = dladm_part_info(handle, ibpartid, &ibpart_attr,
878 DLADM_OPT_ACTIVE);
879 if (status != DLADM_STATUS_OK) {
880 rcm_log_message(RCM_TRACE1,
881 "IBPART: ibpart_update() cannot get ibpart information for "
882 "%u(%s)\n", ibpartid, dladm_status2str(status, errmsg));
883 return (DLADM_WALK_CONTINUE);
884 }
885
886 if (ibpart_attr.dia_physlinkid == DATALINK_INVALID_LINKID) {
887 /*
888 * Skip the IB port nodes.
889 */
890 rcm_log_message(RCM_TRACE1,
891 "IBPART: ibpart_update(): skip the PORT nodes %u\n",
892 ibpartid);
893 return (DLADM_WALK_CONTINUE);
894 }
895
896 rsrc = malloc(RCM_LINK_RESOURCE_MAX);
897 if (rsrc == NULL) {
898 rcm_log_message(RCM_ERROR, _("IBPART: malloc error(%s): %u\n"),
899 strerror(errno), ibpartid);
900 goto done;
901 }
902
903 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
904 RCM_LINK_PREFIX, ibpart_attr.dia_physlinkid);
905
906 node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH);
907 if (node != NULL) {
908 rcm_log_message(RCM_DEBUG,
909 "IBPART: %s already registered (ibpartid:%d)\n",
910 rsrc, ibpart_attr.dia_partlinkid);
911 free(rsrc);
912 } else {
913 rcm_log_message(RCM_DEBUG,
914 "IBPART: %s is a new resource (ibpartid:%d)\n",
915 rsrc, ibpart_attr.dia_partlinkid);
916 if ((node = calloc(1, sizeof (link_cache_t))) == NULL) {
917 free(rsrc);
918 rcm_log_message(RCM_ERROR, _("IBPART: calloc: %s\n"),
919 strerror(errno));
920 goto done;
921 }
922
923 node->pc_resource = rsrc;
924 node->pc_ibpart = NULL;
925 node->pc_linkid = ibpart_attr.dia_physlinkid;
926 node->pc_state |= CACHE_NODE_NEW;
927 newnode = B_TRUE;
928 }
929
930 for (ibpart = node->pc_ibpart; ibpart != NULL;
931 ibpart = ibpart->dlib_next) {
932 if (ibpart->dlib_ibpart_id == ibpartid) {
933 ibpart->dlib_flags &= ~IBPART_STALE;
934 break;
935 }
936 }
937
938 if (ibpart == NULL) {
939 if ((ibpart = calloc(1, sizeof (dl_ibpart_t))) == NULL) {
940 rcm_log_message(RCM_ERROR, _("IBPART: malloc: %s\n"),
941 strerror(errno));
942 if (newnode) {
943 free(rsrc);
944 free(node);
945 }
946 goto done;
947 }
948 ibpart->dlib_ibpart_id = ibpartid;
949 ibpart->dlib_next = node->pc_ibpart;
950 ibpart->dlib_prev = NULL;
951 if (node->pc_ibpart != NULL)
952 node->pc_ibpart->dlib_prev = ibpart;
953 node->pc_ibpart = ibpart;
954 }
955
956 node->pc_state &= ~CACHE_NODE_STALE;
957
958 if (newnode)
959 cache_insert(node);
960
961 rcm_log_message(RCM_TRACE3, "IBPART: ibpart_update: succeeded(%u)\n",
962 ibpartid);
963 ret = 0;
964 done:
965 ibpart_update_argp->retval = ret;
966 return (ret == 0 ? DLADM_WALK_CONTINUE : DLADM_WALK_TERMINATE);
967 }
968
969 /*
970 * ibpart_update_all() - Determine all IBPART links in the system
971 */
972 static int
ibpart_update_all(rcm_handle_t * hd)973 ibpart_update_all(rcm_handle_t *hd)
974 {
975 ibpart_update_arg_t arg = {NULL, 0};
976
977 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_update_all\n");
978
979 assert(MUTEX_HELD(&cache_lock));
980 arg.hd = hd;
981 (void) dladm_walk_datalink_id(ibpart_update, dld_handle, &arg,
982 DATALINK_CLASS_PART, DATALINK_ANY_MEDIATYPE, DLADM_OPT_ACTIVE);
983 return (arg.retval);
984 }
985
986 /*
987 * cache_update() - Update cache with latest interface info
988 */
989 static int
cache_update(rcm_handle_t * hd)990 cache_update(rcm_handle_t *hd)
991 {
992 link_cache_t *node, *nnode;
993 dl_ibpart_t *ibpart;
994 int rv;
995
996 rcm_log_message(RCM_TRACE2, "IBPART: cache_update\n");
997
998 (void) mutex_lock(&cache_lock);
999
1000 /* first we walk the entire cache, marking each entry stale */
1001 node = cache_head.pc_next;
1002 for (; node != &cache_tail; node = node->pc_next) {
1003 node->pc_state |= CACHE_NODE_STALE;
1004 for (ibpart = node->pc_ibpart; ibpart != NULL;
1005 ibpart = ibpart->dlib_next)
1006 ibpart->dlib_flags |= IBPART_STALE;
1007 }
1008
1009 rv = ibpart_update_all(hd);
1010
1011 /*
1012 * Continue to delete all stale nodes from the cache even
1013 * ibpart_update_all() failed. Unregister link that are not offlined
1014 * and still in cache
1015 */
1016 for (node = cache_head.pc_next; node != &cache_tail; node = nnode) {
1017 dl_ibpart_t *ibpart, *next;
1018
1019 for (ibpart = node->pc_ibpart; ibpart != NULL; ibpart = next) {
1020 next = ibpart->dlib_next;
1021
1022 /* clear stale IBPARTs */
1023 if (ibpart->dlib_flags & IBPART_STALE) {
1024 if (ibpart->dlib_prev != NULL)
1025 ibpart->dlib_prev->dlib_next = next;
1026 else
1027 node->pc_ibpart = next;
1028
1029 if (next != NULL)
1030 next->dlib_prev = ibpart->dlib_prev;
1031 free(ibpart);
1032 }
1033 }
1034
1035 nnode = node->pc_next;
1036 if (node->pc_state & CACHE_NODE_STALE) {
1037 (void) rcm_unregister_interest(hd, node->pc_resource,
1038 0);
1039 rcm_log_message(RCM_DEBUG, "IBPART: unregistered %s\n",
1040 node->pc_resource);
1041 assert(node->pc_ibpart == NULL);
1042 cache_remove(node);
1043 node_free(node);
1044 continue;
1045 }
1046
1047 if (!(node->pc_state & CACHE_NODE_NEW))
1048 continue;
1049
1050 if (rcm_register_interest(hd, node->pc_resource, 0, NULL) !=
1051 RCM_SUCCESS) {
1052 rcm_log_message(RCM_ERROR,
1053 _("IBPART: failed to register %s\n"),
1054 node->pc_resource);
1055 rv = -1;
1056 } else {
1057 rcm_log_message(RCM_DEBUG, "IBPART: registered %s\n",
1058 node->pc_resource);
1059 node->pc_state &= ~CACHE_NODE_NEW;
1060 }
1061 }
1062
1063 (void) mutex_unlock(&cache_lock);
1064 return (rv);
1065 }
1066
1067 /*
1068 * cache_free() - Empty the cache
1069 */
1070 static void
cache_free()1071 cache_free()
1072 {
1073 link_cache_t *node;
1074
1075 rcm_log_message(RCM_TRACE2, "IBPART: cache_free\n");
1076
1077 (void) mutex_lock(&cache_lock);
1078 node = cache_head.pc_next;
1079 while (node != &cache_tail) {
1080 cache_remove(node);
1081 node_free(node);
1082 node = cache_head.pc_next;
1083 }
1084 (void) mutex_unlock(&cache_lock);
1085 }
1086
1087 /*
1088 * ibpart_log_err() - RCM error log wrapper
1089 */
1090 static void
ibpart_log_err(datalink_id_t linkid,char ** errorp,char * errmsg)1091 ibpart_log_err(datalink_id_t linkid, char **errorp, char *errmsg)
1092 {
1093 char link[MAXLINKNAMELEN];
1094 char errstr[DLADM_STRSIZE];
1095 dladm_status_t status;
1096 int len;
1097 const char *errfmt;
1098 char *error;
1099
1100 link[0] = '\0';
1101 if (linkid != DATALINK_INVALID_LINKID) {
1102 char rsrc[RCM_LINK_RESOURCE_MAX];
1103
1104 (void) snprintf(rsrc, sizeof (rsrc), "%s/%u",
1105 RCM_LINK_PREFIX, linkid);
1106
1107 rcm_log_message(RCM_ERROR, _("IBPART: %s(%s)\n"), errmsg, rsrc);
1108 if ((status = dladm_datalink_id2info(dld_handle, linkid, NULL,
1109 NULL, NULL, link, sizeof (link))) != DLADM_STATUS_OK) {
1110 rcm_log_message(RCM_WARNING,
1111 _("IBPART: cannot get link name for (%s) %s\n"),
1112 rsrc, dladm_status2str(status, errstr));
1113 }
1114 } else {
1115 rcm_log_message(RCM_ERROR, _("IBPART: %s\n"), errmsg);
1116 }
1117
1118 errfmt = strlen(link) > 0 ? _("IBPART: %s(%s)") : _("IBPART: %s");
1119 len = strlen(errfmt) + strlen(errmsg) + MAXLINKNAMELEN + 1;
1120 if ((error = malloc(len)) != NULL) {
1121 if (strlen(link) > 0)
1122 (void) snprintf(error, len, errfmt, errmsg, link);
1123 else
1124 (void) snprintf(error, len, errfmt, errmsg);
1125 }
1126
1127 if (errorp != NULL)
1128 *errorp = error;
1129 }
1130
1131 /*
1132 * ibpart_consumer_online()
1133 *
1134 * Notify online to IBPART consumers.
1135 */
1136 /* ARGSUSED */
1137 static void
ibpart_consumer_online(rcm_handle_t * hd,link_cache_t * node,char ** errorp,uint_t flags,rcm_info_t ** info)1138 ibpart_consumer_online(rcm_handle_t *hd, link_cache_t *node, char **errorp,
1139 uint_t flags, rcm_info_t **info)
1140 {
1141 dl_ibpart_t *ibpart;
1142 char rsrc[RCM_LINK_RESOURCE_MAX];
1143
1144 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_online (%s)\n",
1145 node->pc_resource);
1146
1147 for (ibpart = node->pc_ibpart; ibpart != NULL;
1148 ibpart = ibpart->dlib_next) {
1149 if (!(ibpart->dlib_flags & IBPART_CONSUMER_OFFLINED))
1150 continue;
1151
1152 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
1153 RCM_LINK_PREFIX, ibpart->dlib_ibpart_id);
1154
1155 if (rcm_notify_online(hd, rsrc, flags, info) == RCM_SUCCESS)
1156 ibpart->dlib_flags &= ~IBPART_CONSUMER_OFFLINED;
1157 }
1158
1159 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_online done\n");
1160 }
1161
1162 /*
1163 * ibpart_consumer_offline()
1164 *
1165 * Offline IBPART consumers.
1166 */
1167 static int
ibpart_consumer_offline(rcm_handle_t * hd,link_cache_t * node,char ** errorp,uint_t flags,rcm_info_t ** info)1168 ibpart_consumer_offline(rcm_handle_t *hd, link_cache_t *node, char **errorp,
1169 uint_t flags, rcm_info_t **info)
1170 {
1171 dl_ibpart_t *ibpart;
1172 char rsrc[RCM_LINK_RESOURCE_MAX];
1173 int ret = RCM_SUCCESS;
1174
1175 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_offline (%s)\n",
1176 node->pc_resource);
1177
1178 for (ibpart = node->pc_ibpart; ibpart != NULL;
1179 ibpart = ibpart->dlib_next) {
1180 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u",
1181 RCM_LINK_PREFIX, ibpart->dlib_ibpart_id);
1182
1183 ret = rcm_request_offline(hd, rsrc, flags, info);
1184 if (ret != RCM_SUCCESS)
1185 break;
1186
1187 ibpart->dlib_flags |= IBPART_CONSUMER_OFFLINED;
1188 }
1189
1190 if (ibpart != NULL)
1191 ibpart_consumer_online(hd, node, errorp, flags, info);
1192
1193 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_offline done\n");
1194 return (ret);
1195 }
1196
1197 /*
1198 * Send RCM_RESOURCE_LINK_NEW events to other modules about new IBPARTs.
1199 * Return 0 on success, -1 on failure.
1200 */
1201 static int
ibpart_notify_new_ibpart(rcm_handle_t * hd,char * rsrc)1202 ibpart_notify_new_ibpart(rcm_handle_t *hd, char *rsrc)
1203 {
1204 link_cache_t *node;
1205 dl_ibpart_t *ibpart;
1206 nvlist_t *nvl = NULL;
1207 uint64_t id;
1208 int ret = -1;
1209
1210 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_notify_new_ibpart (%s)\n",
1211 rsrc);
1212
1213 (void) mutex_lock(&cache_lock);
1214 if ((node = cache_lookup(hd, rsrc, CACHE_REFRESH)) == NULL) {
1215 (void) mutex_unlock(&cache_lock);
1216 return (0);
1217 }
1218
1219 if (nvlist_alloc(&nvl, 0, 0) != 0) {
1220 (void) mutex_unlock(&cache_lock);
1221 rcm_log_message(RCM_WARNING,
1222 _("IBPART: failed to allocate nvlist\n"));
1223 goto done;
1224 }
1225
1226 for (ibpart = node->pc_ibpart; ibpart != NULL;
1227 ibpart = ibpart->dlib_next) {
1228 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_notify_new_ibpart "
1229 "add (%u)\n", ibpart->dlib_ibpart_id);
1230
1231 id = ibpart->dlib_ibpart_id;
1232 if (nvlist_add_uint64(nvl, RCM_NV_LINKID, id) != 0) {
1233 rcm_log_message(RCM_ERROR,
1234 _("IBPART: failed to construct nvlist\n"));
1235 (void) mutex_unlock(&cache_lock);
1236 goto done;
1237 }
1238 }
1239 (void) mutex_unlock(&cache_lock);
1240
1241 if (rcm_notify_event(hd, RCM_RESOURCE_LINK_NEW, 0, nvl, NULL) !=
1242 RCM_SUCCESS) {
1243 rcm_log_message(RCM_ERROR,
1244 _("IBPART: failed to notify %s event for %s\n"),
1245 RCM_RESOURCE_LINK_NEW, node->pc_resource);
1246 goto done;
1247 }
1248
1249 ret = 0;
1250 done:
1251 if (nvl != NULL)
1252 nvlist_free(nvl);
1253 return (ret);
1254 }
1255
1256 /*
1257 * ibpart_consumer_notify() - Notify consumers of IBPARTs coming back online.
1258 */
1259 static int
ibpart_consumer_notify(rcm_handle_t * hd,datalink_id_t linkid,char ** errorp,uint_t flags,rcm_info_t ** info)1260 ibpart_consumer_notify(rcm_handle_t *hd, datalink_id_t linkid, char **errorp,
1261 uint_t flags, rcm_info_t **info)
1262 {
1263 char rsrc[RCM_LINK_RESOURCE_MAX];
1264 link_cache_t *node;
1265
1266 /* Check for the interface in the cache */
1267 (void) snprintf(rsrc, RCM_LINK_RESOURCE_MAX, "%s/%u", RCM_LINK_PREFIX,
1268 linkid);
1269
1270 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_notify(%s)\n",
1271 rsrc);
1272
1273 /*
1274 * Inform IP consumers of the new link.
1275 */
1276 if (ibpart_notify_new_ibpart(hd, rsrc) != 0) {
1277 (void) mutex_lock(&cache_lock);
1278 if ((node = cache_lookup(hd, rsrc, CACHE_NO_REFRESH)) != NULL) {
1279 (void) ibpart_offline_ibpart(node, IBPART_STALE,
1280 CACHE_NODE_STALE);
1281 }
1282 (void) mutex_unlock(&cache_lock);
1283 rcm_log_message(RCM_TRACE2,
1284 "IBPART: ibpart_notify_new_ibpart failed(%s)\n", rsrc);
1285 return (-1);
1286 }
1287
1288 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_consumer_notify "
1289 "succeeded\n");
1290 return (0);
1291 }
1292
1293 typedef struct ibpart_up_arg_s {
1294 datalink_id_t linkid;
1295 int retval;
1296 } ibpart_up_arg_t;
1297
1298 static int
ibpart_up(dladm_handle_t handle,datalink_id_t ibpartid,void * arg)1299 ibpart_up(dladm_handle_t handle, datalink_id_t ibpartid, void *arg)
1300 {
1301 ibpart_up_arg_t *ibpart_up_argp = arg;
1302 dladm_status_t status;
1303 dladm_ib_attr_t ibpart_attr;
1304 char errmsg[DLADM_STRSIZE];
1305
1306 status = dladm_part_info(handle, ibpartid, &ibpart_attr,
1307 DLADM_OPT_PERSIST);
1308 if (status != DLADM_STATUS_OK) {
1309 rcm_log_message(RCM_TRACE1,
1310 "IBPART: ibpart_up(): cannot get information for IBPART %u "
1311 "(%s)\n", ibpartid, dladm_status2str(status, errmsg));
1312 return (DLADM_WALK_CONTINUE);
1313 }
1314
1315 if (ibpart_attr.dia_physlinkid != ibpart_up_argp->linkid)
1316 return (DLADM_WALK_CONTINUE);
1317
1318 rcm_log_message(RCM_TRACE3, "IBPART: ibpart_up(%u)\n", ibpartid);
1319 if ((status = dladm_part_up(handle, ibpartid, 0)) == DLADM_STATUS_OK)
1320 return (DLADM_WALK_CONTINUE);
1321
1322 /*
1323 * Prompt the warning message and continue to UP other IBPARTs.
1324 */
1325 rcm_log_message(RCM_WARNING,
1326 _("IBPART: IBPART up failed (%u): %s\n"),
1327 ibpartid, dladm_status2str(status, errmsg));
1328
1329 ibpart_up_argp->retval = -1;
1330 return (DLADM_WALK_CONTINUE);
1331 }
1332
1333 /*
1334 * ibpart_configure() - Configure IBPARTs over a physical link after it attaches
1335 */
1336 static int
ibpart_configure(rcm_handle_t * hd,datalink_id_t linkid)1337 ibpart_configure(rcm_handle_t *hd, datalink_id_t linkid)
1338 {
1339 char rsrc[RCM_LINK_RESOURCE_MAX];
1340 link_cache_t *node;
1341 ibpart_up_arg_t arg = {DATALINK_INVALID_LINKID, 0};
1342
1343 /* Check for the IBPARTs in the cache */
1344 (void) snprintf(rsrc, sizeof (rsrc), "%s/%u", RCM_LINK_PREFIX, linkid);
1345
1346 rcm_log_message(RCM_TRACE2, "IBPART: ibpart_configure(%s)\n", rsrc);
1347
1348 /* Check if the link is new or was previously offlined */
1349 (void) mutex_lock(&cache_lock);
1350 if (((node = cache_lookup(hd, rsrc, CACHE_REFRESH)) != NULL) &&
1351 (!(node->pc_state & CACHE_NODE_OFFLINED))) {
1352 rcm_log_message(RCM_TRACE2,
1353 "IBPART: Skipping configured interface(%s)\n", rsrc);
1354 (void) mutex_unlock(&cache_lock);
1355 return (0);
1356 }
1357 (void) mutex_unlock(&cache_lock);
1358
1359 arg.linkid = linkid;
1360 (void) dladm_walk_datalink_id(ibpart_up, dld_handle, &arg,
1361 DATALINK_CLASS_PART, DATALINK_ANY_MEDIATYPE, DLADM_OPT_PERSIST);
1362
1363 if (arg.retval == 0) {
1364 rcm_log_message(RCM_TRACE2,
1365 "IBPART: ibpart_configure succeeded(%s)\n", rsrc);
1366 }
1367 return (arg.retval);
1368 }
1369