1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * I/O support routines for DR
29 */
30
31 #include <sys/types.h>
32 #include <sys/cmn_err.h>
33 #include <sys/debug.h>
34 #include <sys/errno.h>
35 #include <sys/dditypes.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/sunndi.h>
39 #include <sys/ndi_impldefs.h>
40 #include <sys/kmem.h>
41 #include <sys/promif.h>
42 #include <sys/sysmacros.h>
43 #include <sys/archsystm.h>
44 #include <sys/machsystm.h>
45
46 #include <sys/dr.h>
47 #include <sys/dr_util.h>
48 #include <sys/drmach.h>
49
50 void
dr_init_io_unit(dr_io_unit_t * ip)51 dr_init_io_unit(dr_io_unit_t *ip)
52 {
53 dr_state_t new_state;
54
55 if (DR_DEV_IS_ATTACHED(&ip->sbi_cm)) {
56 new_state = DR_STATE_CONFIGURED;
57 ip->sbi_cm.sbdev_cond = SBD_COND_OK;
58 } else if (DR_DEV_IS_PRESENT(&ip->sbi_cm)) {
59 new_state = DR_STATE_CONNECTED;
60 ip->sbi_cm.sbdev_cond = SBD_COND_OK;
61 } else {
62 new_state = DR_STATE_EMPTY;
63 }
64 dr_device_transition(&ip->sbi_cm, new_state);
65 }
66
67 /*ARGSUSED*/
68 void
dr_attach_io(dr_handle_t * hp,dr_common_unit_t * cp)69 dr_attach_io(dr_handle_t *hp, dr_common_unit_t *cp)
70 {
71 sbd_error_t *err;
72
73 dr_lock_status(hp->h_bd);
74 err = drmach_configure(cp->sbdev_id, 0);
75 dr_unlock_status(hp->h_bd);
76
77 if (!err)
78 err = drmach_io_post_attach(cp->sbdev_id);
79
80 if (err)
81 DRERR_SET_C(&cp->sbdev_error, &err);
82 }
83
84 /*
85 * remove device nodes for the branch indicated by cp
86 */
87 /*ARGSUSED*/
88 void
dr_detach_io(dr_handle_t * hp,dr_common_unit_t * cp)89 dr_detach_io(dr_handle_t *hp, dr_common_unit_t *cp)
90 {
91 sbd_error_t *err;
92
93 err = drmach_unconfigure(cp->sbdev_id, 0);
94
95 if (!err)
96 err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
97
98 if (!err)
99 err = drmach_io_post_release(cp->sbdev_id);
100
101 if (err) {
102 dr_device_transition(cp, DR_STATE_CONFIGURED);
103 DRERR_SET_C(&cp->sbdev_error, &err);
104 }
105 }
106
107 /*ARGSUSED*/
108 int
dr_disconnect_io(dr_io_unit_t * ip)109 dr_disconnect_io(dr_io_unit_t *ip)
110 {
111 return (0);
112 }
113
114 /*ARGSUSED*/
115 int
dr_pre_attach_io(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)116 dr_pre_attach_io(dr_handle_t *hp,
117 dr_common_unit_t **devlist, int devnum)
118 {
119 int d;
120
121 for (d = 0; d < devnum; d++) {
122 dr_common_unit_t *cp = devlist[d];
123
124 cmn_err(CE_CONT, "OS configure %s", cp->sbdev_path);
125 }
126
127 return (0);
128 }
129
130 /*ARGSUSED*/
131 int
dr_post_attach_io(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)132 dr_post_attach_io(dr_handle_t *hp,
133 dr_common_unit_t **devlist, int devnum)
134 {
135 return (0);
136 }
137
138 static int
dr_check_io_refs(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)139 dr_check_io_refs(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
140 {
141 register int i, reftotal = 0;
142 static fn_t f = "dr_check_io_refs";
143
144 for (i = 0; i < devnum; i++) {
145 dr_io_unit_t *ip = (dr_io_unit_t *)devlist[i];
146 dev_info_t *dip;
147 int ref;
148 int refcount_non_gldv3;
149 sbd_error_t *err;
150
151 err = drmach_get_dip(ip->sbi_cm.sbdev_id, &dip);
152 if (err)
153 DRERR_SET_C(&ip->sbi_cm.sbdev_error, &err);
154 else if (dip != NULL) {
155 ref = 0;
156 refcount_non_gldv3 = 0;
157 ASSERT(e_ddi_branch_held(dip));
158 dr_check_devices(dip, &ref, hp, NULL, NULL,
159 0, &refcount_non_gldv3);
160 ASSERT(refcount_non_gldv3 >= 0);
161 ASSERT(ref >= refcount_non_gldv3);
162 /*
163 * Ignore reference counts of non-gldv3 network devices
164 * as Crossbow creates reference counts for non-active
165 * (unplumbed) instances. Reference count check in
166 * detach() known to prevent device from detaching
167 * as necessary.
168 */
169 ref -= refcount_non_gldv3;
170 hp->h_err = NULL;
171 if (ref) {
172 dr_dev_err(CE_WARN, &ip->sbi_cm, ESBD_BUSY);
173 }
174 PR_IO("%s: dip(%s) ref = %d\n",
175 f, ddi_get_name(dip), ref);
176 reftotal += ref;
177 } else {
178 PR_IO("%s: NO dip for id (0x%x)\n",
179 f, (uint_t)(uintptr_t)ip->sbi_cm.sbdev_id);
180 }
181 }
182
183 return (reftotal);
184 }
185
186 int
dr_pre_release_io(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)187 dr_pre_release_io(dr_handle_t *hp,
188 dr_common_unit_t **devlist, int devnum)
189 {
190 static fn_t f = "dr_pre_release_io";
191 int d;
192
193 ASSERT(devnum > 0);
194
195 /* fail if any I/O device pre-release fails */
196 for (d = 0; d < devnum; d++) {
197 dr_io_unit_t *ip = (dr_io_unit_t *)devlist[d];
198
199 if ((hp->h_err = drmach_io_pre_release(
200 ip->sbi_cm.sbdev_id)) != 0) {
201 return (-1);
202 }
203 }
204
205 for (d = 0; d < devnum; d++) {
206 dr_io_unit_t *ip = (dr_io_unit_t *)devlist[d];
207 sbd_error_t *err;
208
209 err = drmach_release(ip->sbi_cm.sbdev_id);
210 if (err) {
211 DRERR_SET_C(&ip->sbi_cm.sbdev_error,
212 &err);
213 return (-1);
214 }
215 }
216
217 /* fail if any I/O devices are still referenced */
218 if (dr_check_io_refs(hp, devlist, devnum) > 0) {
219 PR_IO("%s: failed - I/O devices ref'd\n", f);
220
221 /* recover before return error */
222 for (d = 0; d < devnum; d++) {
223 dr_io_unit_t *ip = (dr_io_unit_t *)devlist[d];
224 sbd_error_t *err;
225 err = drmach_io_unrelease(ip->sbi_cm.sbdev_id);
226 if (err) {
227 DRERR_SET_C(&ip->sbi_cm.sbdev_error, &err);
228 return (-1);
229 }
230 }
231 return (-1);
232 }
233 return (0);
234 }
235
236 /*ARGSUSED*/
237 int
dr_pre_detach_io(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)238 dr_pre_detach_io(dr_handle_t *hp,
239 dr_common_unit_t **devlist, int devnum)
240 {
241 int d;
242
243 ASSERT(devnum > 0);
244
245 for (d = 0; d < devnum; d++) {
246 dr_common_unit_t *cp = devlist[d];
247
248 cmn_err(CE_CONT, "OS unconfigure %s", cp->sbdev_path);
249 }
250
251 return (0);
252 }
253
254 /*ARGSUSED*/
255 int
dr_post_detach_io(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)256 dr_post_detach_io(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
257 {
258 register int i;
259 int rv = 0;
260 static fn_t f = "dr_post_detach_io";
261
262 ASSERT(devnum > 0);
263 for (i = 0; i < devnum; i++) {
264 dr_common_unit_t *cp = devlist[i];
265 if (cp->sbdev_error != NULL) {
266 PR_IO("%s: Failed\n", f);
267 rv = -1;
268 break;
269 }
270 }
271 return (rv);
272 }
273
274 static void
dr_get_comp_cond(dr_io_unit_t * ip,dev_info_t * dip)275 dr_get_comp_cond(dr_io_unit_t *ip, dev_info_t *dip)
276 {
277 if (dip == NULL) {
278 ip->sbi_cm.sbdev_cond = SBD_COND_UNKNOWN;
279 return;
280 }
281
282 if (DEVI(dip)->devi_flags & DEVI_RETIRED) {
283 ip->sbi_cm.sbdev_cond = SBD_COND_FAILED;
284 return;
285 }
286
287 if (DR_DEV_IS_ATTACHED(&ip->sbi_cm)) {
288 ip->sbi_cm.sbdev_cond = SBD_COND_OK;
289 } else if (DR_DEV_IS_PRESENT(&ip->sbi_cm)) {
290 ip->sbi_cm.sbdev_cond = SBD_COND_OK;
291 }
292 }
293
294 int
dr_io_status(dr_handle_t * hp,dr_devset_t devset,sbd_dev_stat_t * dsp)295 dr_io_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
296 {
297 int i, ix;
298 dr_board_t *bp;
299 sbd_io_stat_t *isp;
300 dr_io_unit_t *ip;
301
302 bp = hp->h_bd;
303
304 /*
305 * Only look for requested devices that are actually present.
306 */
307 devset &= DR_DEVS_PRESENT(bp);
308
309 for (i = ix = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
310 drmachid_t id;
311 dev_info_t *dip;
312 sbd_error_t *err;
313 drmach_status_t pstat;
314
315 if (DEVSET_IN_SET(devset, SBD_COMP_IO, i) == 0)
316 continue;
317
318 ip = dr_get_io_unit(bp, i);
319
320 if (ip->sbi_cm.sbdev_state == DR_STATE_EMPTY) {
321 /* present, but not fully initialized */
322 continue;
323 }
324
325 id = ip->sbi_cm.sbdev_id;
326 if (id == (drmachid_t)0)
327 continue;
328
329 err = drmach_status(ip->sbi_cm.sbdev_id, &pstat);
330 if (err) {
331 DRERR_SET_C(&ip->sbi_cm.sbdev_error, &err);
332 return (-1);
333 }
334
335 dip = NULL;
336 err = drmach_get_dip(id, &dip);
337 if (err) {
338 /* catch this in debug kernels */
339 ASSERT(0);
340
341 sbd_err_clear(&err);
342 continue;
343 }
344
345 isp = &dsp->d_io;
346 bzero((caddr_t)isp, sizeof (*isp));
347
348 isp->is_cm.c_id.c_type = ip->sbi_cm.sbdev_type;
349 isp->is_cm.c_id.c_unit = ip->sbi_cm.sbdev_unum;
350 (void) strlcpy(isp->is_cm.c_id.c_name, pstat.type,
351 sizeof (isp->is_cm.c_id.c_name));
352
353 dr_get_comp_cond(ip, dip);
354 isp->is_cm.c_cond = ip->sbi_cm.sbdev_cond;
355 isp->is_cm.c_busy = ip->sbi_cm.sbdev_busy | pstat.busy;
356 isp->is_cm.c_time = ip->sbi_cm.sbdev_time;
357 isp->is_cm.c_ostate = ip->sbi_cm.sbdev_ostate;
358 isp->is_cm.c_sflags = 0;
359
360 if (dip == NULL) {
361 isp->is_pathname[0] = '\0';
362 isp->is_referenced = 0;
363 isp->is_unsafe_count = 0;
364 } else {
365 int refcount = 0, idx = 0;
366 uint64_t unsafe_devs[SBD_MAX_UNSAFE];
367
368 ASSERT(e_ddi_branch_held(dip));
369 (void) ddi_pathname(dip, isp->is_pathname);
370
371 /* check reference and unsafe counts on devices */
372 isp->is_unsafe_count = 0;
373 dr_check_devices(dip, &refcount, hp, unsafe_devs,
374 &idx, SBD_MAX_UNSAFE, NULL);
375 while (idx > 0) {
376 isp->is_unsafe_list[idx-1] = unsafe_devs[idx-1];
377 --idx;
378 }
379
380 isp->is_referenced = (refcount == 0) ? 0 : 1;
381
382 hp->h_err = NULL;
383 }
384 ix++;
385 dsp++;
386 }
387
388 return (ix);
389 }
390