1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * This file contains various support routines.
27 */
28
29 #include <sys/scsi/adapters/pmcs/pmcs.h>
30
31 /*
32 * Local static data
33 */
34 static int tgtmap_stable_usec = MICROSEC; /* 1 second */
35 static int tgtmap_csync_usec = 10 * MICROSEC; /* 10 seconds */
36
37 /*
38 * SAS Topology Configuration
39 */
40 static void pmcs_new_tport(pmcs_hw_t *, pmcs_phy_t *);
41 static void pmcs_configure_expander(pmcs_hw_t *, pmcs_phy_t *, pmcs_iport_t *);
42
43 static void pmcs_check_expanders(pmcs_hw_t *, pmcs_phy_t *);
44 static void pmcs_check_expander(pmcs_hw_t *, pmcs_phy_t *);
45 static void pmcs_clear_expander(pmcs_hw_t *, pmcs_phy_t *, int);
46
47 static int pmcs_expander_get_nphy(pmcs_hw_t *, pmcs_phy_t *);
48 static int pmcs_expander_content_discover(pmcs_hw_t *, pmcs_phy_t *,
49 pmcs_phy_t *);
50
51 static int pmcs_smp_function_result(pmcs_hw_t *, smp_response_frame_t *);
52 static void pmcs_flush_nonio_cmds(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt);
53 static boolean_t pmcs_validate_devid(pmcs_phy_t *, pmcs_phy_t *, uint32_t);
54 static void pmcs_clear_phys(pmcs_hw_t *, pmcs_phy_t *);
55 static int pmcs_configure_new_devices(pmcs_hw_t *, pmcs_phy_t *);
56 static void pmcs_begin_observations(pmcs_hw_t *);
57 static void pmcs_flush_observations(pmcs_hw_t *);
58 static boolean_t pmcs_report_observations(pmcs_hw_t *);
59 static boolean_t pmcs_report_iport_observations(pmcs_hw_t *, pmcs_iport_t *,
60 pmcs_phy_t *);
61 #ifdef DEBUG
62 static pmcs_phy_t *pmcs_find_phy_needing_work(pmcs_hw_t *, pmcs_phy_t *);
63 #endif
64 static int pmcs_kill_devices(pmcs_hw_t *, pmcs_phy_t *);
65 static void pmcs_lock_phy_impl(pmcs_phy_t *, int);
66 static void pmcs_unlock_phy_impl(pmcs_phy_t *, int);
67 static pmcs_phy_t *pmcs_clone_phy(pmcs_phy_t *);
68 static boolean_t pmcs_configure_phy(pmcs_hw_t *, pmcs_phy_t *);
69 static void pmcs_reap_dead_phy(pmcs_phy_t *);
70 static pmcs_iport_t *pmcs_get_iport_by_ua(pmcs_hw_t *, char *);
71 static boolean_t pmcs_phy_target_match(pmcs_phy_t *);
72 static void pmcs_iport_active(pmcs_iport_t *);
73 static void pmcs_tgtmap_activate_cb(void *, char *, scsi_tgtmap_tgt_type_t,
74 void **);
75 static boolean_t pmcs_tgtmap_deactivate_cb(void *, char *,
76 scsi_tgtmap_tgt_type_t, void *, scsi_tgtmap_deact_rsn_t);
77 static void pmcs_add_dead_phys(pmcs_hw_t *, pmcs_phy_t *);
78 static void pmcs_get_fw_version(pmcs_hw_t *);
79 static int pmcs_get_time_stamp(pmcs_hw_t *, uint64_t *, hrtime_t *);
80
81 /*
82 * Often used strings
83 */
84 const char pmcs_nowrk[] = "%s: unable to get work structure";
85 const char pmcs_nomsg[] = "%s: unable to get Inbound Message entry";
86 const char pmcs_timeo[] = "%s: command timed out";
87
88 extern const ddi_dma_attr_t pmcs_dattr;
89 extern kmutex_t pmcs_trace_lock;
90
91 /*
92 * Some Initial setup steps.
93 */
94
95 int
pmcs_setup(pmcs_hw_t * pwp)96 pmcs_setup(pmcs_hw_t *pwp)
97 {
98 uint32_t barval = pwp->mpibar;
99 uint32_t i, scratch, regbar, regoff, barbar, baroff;
100 uint32_t new_ioq_depth, ferr = 0;
101
102 /*
103 * Check current state. If we're not at READY state,
104 * we can't go further.
105 */
106 scratch = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1);
107 if ((scratch & PMCS_MSGU_AAP_STATE_MASK) == PMCS_MSGU_AAP_STATE_ERROR) {
108 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
109 "%s: AAP Error State (0x%x)",
110 __func__, pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
111 PMCS_MSGU_AAP_ERROR_MASK);
112 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE);
113 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
114 return (-1);
115 }
116 if ((scratch & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) {
117 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
118 "%s: AAP unit not ready (state 0x%x)",
119 __func__, scratch & PMCS_MSGU_AAP_STATE_MASK);
120 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE);
121 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
122 return (-1);
123 }
124
125 /*
126 * Read the offset from the Message Unit scratchpad 0 register.
127 * This allows us to read the MPI Configuration table.
128 *
129 * Check its signature for validity.
130 */
131 baroff = barval;
132 barbar = barval >> PMCS_MSGU_MPI_BAR_SHIFT;
133 baroff &= PMCS_MSGU_MPI_OFFSET_MASK;
134
135 regoff = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0);
136 regbar = regoff >> PMCS_MSGU_MPI_BAR_SHIFT;
137 regoff &= PMCS_MSGU_MPI_OFFSET_MASK;
138
139 if (regoff > baroff) {
140 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
141 "%s: bad MPI Table Length (register offset=0x%08x, "
142 "passed offset=0x%08x)", __func__, regoff, baroff);
143 return (-1);
144 }
145 if (regbar != barbar) {
146 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
147 "%s: bad MPI BAR (register BAROFF=0x%08x, "
148 "passed BAROFF=0x%08x)", __func__, regbar, barbar);
149 return (-1);
150 }
151 pwp->mpi_offset = regoff;
152 if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS) != PMCS_SIGNATURE) {
153 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
154 "%s: Bad MPI Configuration Table Signature 0x%x", __func__,
155 pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS));
156 return (-1);
157 }
158
159 if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR) != PMCS_MPI_REVISION1) {
160 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
161 "%s: Bad MPI Configuration Revision 0x%x", __func__,
162 pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR));
163 return (-1);
164 }
165
166 /*
167 * Generate offsets for the General System, Inbound Queue Configuration
168 * and Outbound Queue configuration tables. This way the macros to
169 * access those tables will work correctly.
170 */
171 pwp->mpi_gst_offset =
172 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_GSTO);
173 pwp->mpi_iqc_offset =
174 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IQCTO);
175 pwp->mpi_oqc_offset =
176 pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_OQCTO);
177
178 pmcs_get_fw_version(pwp);
179
180 pwp->max_cmd = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MOIO);
181 pwp->max_dev = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO0) >> 16;
182
183 pwp->max_iq = PMCS_MNIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1));
184 pwp->max_oq = PMCS_MNOQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1));
185 pwp->nphy = PMCS_NPHY(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1));
186 if (pwp->max_iq <= PMCS_NIQ) {
187 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
188 "%s: not enough Inbound Queues supported "
189 "(need %d, max_oq=%d)", __func__, pwp->max_iq, PMCS_NIQ);
190 return (-1);
191 }
192 if (pwp->max_oq <= PMCS_NOQ) {
193 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
194 "%s: not enough Outbound Queues supported "
195 "(need %d, max_oq=%d)", __func__, pwp->max_oq, PMCS_NOQ);
196 return (-1);
197 }
198 if (pwp->nphy == 0) {
199 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
200 "%s: zero phys reported", __func__);
201 return (-1);
202 }
203 if (PMCS_HPIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1))) {
204 pwp->hipri_queue = (1 << PMCS_IQ_OTHER);
205 }
206
207
208 for (i = 0; i < pwp->nphy; i++) {
209 PMCS_MPI_EVQSET(pwp, PMCS_OQ_EVENTS, i);
210 PMCS_MPI_NCQSET(pwp, PMCS_OQ_EVENTS, i);
211 }
212
213 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_INFO2,
214 (PMCS_OQ_EVENTS << GENERAL_EVENT_OQ_SHIFT) |
215 (PMCS_OQ_EVENTS << DEVICE_HANDLE_REMOVED_SHIFT));
216
217 /*
218 * Verify that ioq_depth is valid (> 0 and not so high that it
219 * would cause us to overrun the chip with commands).
220 */
221 if (pwp->ioq_depth == 0) {
222 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
223 "%s: I/O queue depth set to 0. Setting to %d",
224 __func__, PMCS_NQENTRY);
225 pwp->ioq_depth = PMCS_NQENTRY;
226 }
227
228 if (pwp->ioq_depth < PMCS_MIN_NQENTRY) {
229 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
230 "%s: I/O queue depth set too low (%d). Setting to %d",
231 __func__, pwp->ioq_depth, PMCS_MIN_NQENTRY);
232 pwp->ioq_depth = PMCS_MIN_NQENTRY;
233 }
234
235 if (pwp->ioq_depth > (pwp->max_cmd / (PMCS_IO_IQ_MASK + 1))) {
236 new_ioq_depth = pwp->max_cmd / (PMCS_IO_IQ_MASK + 1);
237 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
238 "%s: I/O queue depth set too high (%d). Setting to %d",
239 __func__, pwp->ioq_depth, new_ioq_depth);
240 pwp->ioq_depth = new_ioq_depth;
241 }
242
243 /*
244 * Allocate consistent memory for OQs and IQs.
245 */
246 pwp->iqp_dma_attr = pwp->oqp_dma_attr = pmcs_dattr;
247 pwp->iqp_dma_attr.dma_attr_align =
248 pwp->oqp_dma_attr.dma_attr_align = PMCS_QENTRY_SIZE;
249
250 /*
251 * The Rev C chip has the ability to do PIO to or from consistent
252 * memory anywhere in a 64 bit address space, but the firmware is
253 * not presently set up to do so.
254 */
255 pwp->iqp_dma_attr.dma_attr_addr_hi =
256 pwp->oqp_dma_attr.dma_attr_addr_hi = 0x000000FFFFFFFFFFull;
257
258 for (i = 0; i < PMCS_NIQ; i++) {
259 if (pmcs_dma_setup(pwp, &pwp->iqp_dma_attr,
260 &pwp->iqp_acchdls[i],
261 &pwp->iqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth,
262 (caddr_t *)&pwp->iqp[i], &pwp->iqaddr[i]) == B_FALSE) {
263 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
264 "Failed to setup DMA for iqp[%d]", i);
265 return (-1);
266 }
267 bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
268 }
269
270 for (i = 0; i < PMCS_NOQ; i++) {
271 if (pmcs_dma_setup(pwp, &pwp->oqp_dma_attr,
272 &pwp->oqp_acchdls[i],
273 &pwp->oqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth,
274 (caddr_t *)&pwp->oqp[i], &pwp->oqaddr[i]) == B_FALSE) {
275 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
276 "Failed to setup DMA for oqp[%d]", i);
277 return (-1);
278 }
279 bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
280 }
281
282 /*
283 * Install the IQ and OQ addresses (and null out the rest).
284 */
285 for (i = 0; i < pwp->max_iq; i++) {
286 pwp->iqpi_offset[i] = pmcs_rd_iqc_tbl(pwp, PMCS_IQPIOFFX(i));
287 if (i < PMCS_NIQ) {
288 if (i != PMCS_IQ_OTHER) {
289 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i),
290 pwp->ioq_depth | (PMCS_QENTRY_SIZE << 16));
291 } else {
292 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i),
293 (1 << 30) | pwp->ioq_depth |
294 (PMCS_QENTRY_SIZE << 16));
295 }
296 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i),
297 DWORD1(pwp->iqaddr[i]));
298 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i),
299 DWORD0(pwp->iqaddr[i]));
300 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i),
301 DWORD1(pwp->ciaddr+IQ_OFFSET(i)));
302 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i),
303 DWORD0(pwp->ciaddr+IQ_OFFSET(i)));
304 } else {
305 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0);
306 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0);
307 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0);
308 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0);
309 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0);
310 }
311 }
312
313 for (i = 0; i < pwp->max_oq; i++) {
314 pwp->oqci_offset[i] = pmcs_rd_oqc_tbl(pwp, PMCS_OQCIOFFX(i));
315 if (i < PMCS_NOQ) {
316 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), pwp->ioq_depth |
317 (PMCS_QENTRY_SIZE << 16) | OQIEX);
318 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i),
319 DWORD1(pwp->oqaddr[i]));
320 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i),
321 DWORD0(pwp->oqaddr[i]));
322 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i),
323 DWORD1(pwp->ciaddr+OQ_OFFSET(i)));
324 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i),
325 DWORD0(pwp->ciaddr+OQ_OFFSET(i)));
326 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i),
327 pwp->oqvec[i] << 24);
328 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0);
329 } else {
330 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0);
331 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0);
332 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0);
333 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0);
334 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0);
335 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0);
336 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0);
337 }
338 }
339
340 /*
341 * Set up logging, if defined.
342 */
343 if (pwp->fwlog) {
344 uint64_t logdma = pwp->fwaddr;
345 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAH, DWORD1(logdma));
346 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAL, DWORD0(logdma));
347 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBS, PMCS_FWLOG_SIZE >> 1);
348 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELSEV, pwp->fwlog);
349 logdma += (PMCS_FWLOG_SIZE >> 1);
350 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAH, DWORD1(logdma));
351 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAL, DWORD0(logdma));
352 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBS, PMCS_FWLOG_SIZE >> 1);
353 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELSEV, pwp->fwlog);
354 }
355
356 /*
357 * Interrupt vectors, outbound queues, and odb_auto_clear
358 *
359 * MSI/MSI-X:
360 * If we got 4 interrupt vectors, we'll assign one to each outbound
361 * queue as well as the fatal interrupt, and auto clear can be set
362 * for each.
363 *
364 * If we only got 2 vectors, one will be used for I/O completions
365 * and the other for the other two vectors. In this case, auto_
366 * clear can only be set for I/Os, which is fine. The fatal
367 * interrupt will be mapped to the PMCS_FATAL_INTERRUPT bit, which
368 * is not an interrupt vector.
369 *
370 * MSI/MSI-X/INT-X:
371 * If we only got 1 interrupt vector, auto_clear must be set to 0,
372 * and again the fatal interrupt will be mapped to the
373 * PMCS_FATAL_INTERRUPT bit (again, not an interrupt vector).
374 */
375
376 switch (pwp->int_type) {
377 case PMCS_INT_MSIX:
378 case PMCS_INT_MSI:
379 switch (pwp->intr_cnt) {
380 case 1:
381 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE |
382 (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT));
383 pwp->odb_auto_clear = 0;
384 break;
385 case 2:
386 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE |
387 (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT));
388 pwp->odb_auto_clear = (1 << PMCS_FATAL_INTERRUPT) |
389 (1 << PMCS_MSIX_IODONE);
390 break;
391 case 4:
392 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE |
393 (PMCS_MSIX_FATAL << PMCS_FERIV_SHIFT));
394 pwp->odb_auto_clear = (1 << PMCS_MSIX_FATAL) |
395 (1 << PMCS_MSIX_GENERAL) | (1 << PMCS_MSIX_IODONE) |
396 (1 << PMCS_MSIX_EVENTS);
397 break;
398 }
399 break;
400
401 case PMCS_INT_FIXED:
402 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR,
403 PMCS_FERRIE | (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT));
404 pwp->odb_auto_clear = 0;
405 break;
406 }
407
408 /*
409 * If the open retry interval is non-zero, set it.
410 */
411 if (pwp->open_retry_interval != 0) {
412 int phynum;
413
414 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
415 "%s: Setting open retry interval to %d usecs", __func__,
416 pwp->open_retry_interval);
417 for (phynum = 0; phynum < pwp->nphy; phynum ++) {
418 pmcs_wr_gsm_reg(pwp, OPEN_RETRY_INTERVAL(phynum),
419 pwp->open_retry_interval);
420 }
421 }
422
423 /*
424 * Enable Interrupt Reassertion
425 * Default Delay 1000us
426 */
427 ferr = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FERR);
428 if ((ferr & PMCS_MPI_IRAE) == 0) {
429 ferr &= ~(PMCS_MPI_IRAU | PMCS_MPI_IRAD_MASK);
430 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, ferr | PMCS_MPI_IRAE);
431 }
432
433 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, pwp->odb_auto_clear);
434 pwp->mpi_table_setup = 1;
435 return (0);
436 }
437
438 /*
439 * Start the Message Passing protocol with the PMC chip.
440 */
441 int
pmcs_start_mpi(pmcs_hw_t * pwp)442 pmcs_start_mpi(pmcs_hw_t *pwp)
443 {
444 int i;
445
446 pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPIINI);
447 for (i = 0; i < 1000; i++) {
448 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) &
449 PMCS_MSGU_IBDB_MPIINI) == 0) {
450 break;
451 }
452 drv_usecwait(1000);
453 }
454 if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPIINI) {
455 return (-1);
456 }
457 drv_usecwait(500000);
458
459 /*
460 * Check to make sure we got to INIT state.
461 */
462 if (PMCS_MPI_S(pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE)) !=
463 PMCS_MPI_STATE_INIT) {
464 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
465 "%s: MPI launch failed (GST 0x%x DBCLR 0x%x)", __func__,
466 pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE),
467 pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB_CLEAR));
468 return (-1);
469 }
470 return (0);
471 }
472
473 /*
474 * Stop the Message Passing protocol with the PMC chip.
475 */
476 int
pmcs_stop_mpi(pmcs_hw_t * pwp)477 pmcs_stop_mpi(pmcs_hw_t *pwp)
478 {
479 int i;
480
481 for (i = 0; i < pwp->max_iq; i++) {
482 pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0);
483 pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0);
484 pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0);
485 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0);
486 pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0);
487 }
488 for (i = 0; i < pwp->max_oq; i++) {
489 pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0);
490 pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0);
491 pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0);
492 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0);
493 pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0);
494 pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0);
495 pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0);
496 }
497 pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, 0);
498 pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPICTU);
499 for (i = 0; i < 2000; i++) {
500 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) &
501 PMCS_MSGU_IBDB_MPICTU) == 0) {
502 break;
503 }
504 drv_usecwait(1000);
505 }
506 if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPICTU) {
507 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
508 "%s: MPI stop failed", __func__);
509 return (-1);
510 }
511 return (0);
512 }
513
514 /*
515 * Do a sequence of ECHO messages to test for MPI functionality,
516 * all inbound and outbound queue functionality and interrupts.
517 */
518 int
pmcs_echo_test(pmcs_hw_t * pwp)519 pmcs_echo_test(pmcs_hw_t *pwp)
520 {
521 echo_test_t fred;
522 struct pmcwork *pwrk;
523 uint32_t *msg, count;
524 int iqe = 0, iqo = 0, result, rval = 0;
525 int iterations;
526 hrtime_t echo_start, echo_end, echo_total;
527
528 ASSERT(pwp->max_cmd > 0);
529
530 /*
531 * We want iterations to be max_cmd * 3 to ensure that we run the
532 * echo test enough times to iterate through every inbound queue
533 * at least twice.
534 */
535 iterations = pwp->max_cmd * 3;
536
537 echo_total = 0;
538 count = 0;
539
540 while (count < iterations) {
541 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
542 if (pwrk == NULL) {
543 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
544 pmcs_nowrk, __func__);
545 rval = -1;
546 break;
547 }
548
549 mutex_enter(&pwp->iqp_lock[iqe]);
550 msg = GET_IQ_ENTRY(pwp, iqe);
551 if (msg == NULL) {
552 mutex_exit(&pwp->iqp_lock[iqe]);
553 pmcs_pwork(pwp, pwrk);
554 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
555 pmcs_nomsg, __func__);
556 rval = -1;
557 break;
558 }
559
560 bzero(msg, PMCS_QENTRY_SIZE);
561
562 if (iqe == PMCS_IQ_OTHER) {
563 /* This is on the high priority queue */
564 msg[0] = LE_32(PMCS_HIPRI(pwp, iqo, PMCIN_ECHO));
565 } else {
566 msg[0] = LE_32(PMCS_IOMB_IN_SAS(iqo, PMCIN_ECHO));
567 }
568 msg[1] = LE_32(pwrk->htag);
569 fred.signature = 0xdeadbeef;
570 fred.count = count;
571 fred.ptr = &count;
572 (void) memcpy(&msg[2], &fred, sizeof (fred));
573 pwrk->state = PMCS_WORK_STATE_ONCHIP;
574
575 INC_IQ_ENTRY(pwp, iqe);
576
577 echo_start = gethrtime();
578 DTRACE_PROBE2(pmcs__echo__test__wait__start,
579 hrtime_t, echo_start, uint32_t, pwrk->htag);
580
581 if (++iqe == PMCS_NIQ) {
582 iqe = 0;
583 }
584 if (++iqo == PMCS_NOQ) {
585 iqo = 0;
586 }
587
588 WAIT_FOR(pwrk, 250, result);
589 pmcs_pwork(pwp, pwrk);
590
591 echo_end = gethrtime();
592 DTRACE_PROBE2(pmcs__echo__test__wait__end,
593 hrtime_t, echo_end, int, result);
594 echo_total += (echo_end - echo_start);
595
596 if (result) {
597 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
598 "%s: command timed out on echo test #%d",
599 __func__, count);
600 rval = -1;
601 break;
602 }
603 }
604
605 /*
606 * The intr_threshold is adjusted by PMCS_INTR_THRESHOLD in order to
607 * remove the overhead of things like the delay in getting signaled
608 * for completion.
609 */
610 if (echo_total != 0) {
611 pwp->io_intr_coal.intr_latency =
612 (echo_total / iterations) / 2;
613 pwp->io_intr_coal.intr_threshold =
614 PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 /
615 pwp->io_intr_coal.intr_latency);
616 }
617
618 return (rval);
619 }
620
621 /*
622 * Start the (real) phys
623 */
624 int
pmcs_start_phy(pmcs_hw_t * pwp,int phynum,int linkmode,int speed)625 pmcs_start_phy(pmcs_hw_t *pwp, int phynum, int linkmode, int speed)
626 {
627 int result;
628 uint32_t *msg;
629 struct pmcwork *pwrk;
630 pmcs_phy_t *pptr;
631 sas_identify_af_t sap;
632
633 mutex_enter(&pwp->lock);
634 pptr = pwp->root_phys + phynum;
635 if (pptr == NULL) {
636 mutex_exit(&pwp->lock);
637 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
638 "%s: cannot find port %d", __func__, phynum);
639 return (0);
640 }
641
642 pmcs_lock_phy(pptr);
643 mutex_exit(&pwp->lock);
644
645 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
646 if (pwrk == NULL) {
647 pmcs_unlock_phy(pptr);
648 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__);
649 return (-1);
650 }
651
652 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
653 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
654
655 if (msg == NULL) {
656 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
657 pmcs_unlock_phy(pptr);
658 pmcs_pwork(pwp, pwrk);
659 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__);
660 return (-1);
661 }
662 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_START));
663 msg[1] = LE_32(pwrk->htag);
664 msg[2] = LE_32(linkmode | speed | phynum);
665 bzero(&sap, sizeof (sap));
666 sap.device_type = SAS_IF_DTYPE_ENDPOINT;
667 sap.ssp_ini_port = 1;
668
669 if (pwp->separate_ports) {
670 pmcs_wwn2barray(pwp->sas_wwns[phynum], sap.sas_address);
671 } else {
672 pmcs_wwn2barray(pwp->sas_wwns[0], sap.sas_address);
673 }
674
675 ASSERT(phynum < SAS2_PHYNUM_MAX);
676 sap.phy_identifier = phynum & SAS2_PHYNUM_MASK;
677 (void) memcpy(&msg[3], &sap, sizeof (sas_identify_af_t));
678 pwrk->state = PMCS_WORK_STATE_ONCHIP;
679 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
680
681 pptr->state.prog_min_rate = (lowbit((ulong_t)speed) - 1);
682 pptr->state.prog_max_rate = (highbit((ulong_t)speed) - 1);
683 pptr->state.hw_min_rate = PMCS_HW_MIN_LINK_RATE;
684 pptr->state.hw_max_rate = PMCS_HW_MAX_LINK_RATE;
685
686 pmcs_unlock_phy(pptr);
687 WAIT_FOR(pwrk, 1000, result);
688 pmcs_pwork(pwp, pwrk);
689
690 if (result) {
691 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
692 } else {
693 mutex_enter(&pwp->lock);
694 pwp->phys_started |= (1 << phynum);
695 mutex_exit(&pwp->lock);
696 }
697
698 return (0);
699 }
700
701 int
pmcs_start_phys(pmcs_hw_t * pwp)702 pmcs_start_phys(pmcs_hw_t *pwp)
703 {
704 int i, rval;
705
706 for (i = 0; i < pwp->nphy; i++) {
707 if ((pwp->phyid_block_mask & (1 << i)) == 0) {
708 if (pmcs_start_phy(pwp, i,
709 (pwp->phymode << PHY_MODE_SHIFT),
710 pwp->physpeed << PHY_LINK_SHIFT)) {
711 return (-1);
712 }
713 if (pmcs_clear_diag_counters(pwp, i)) {
714 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
715 "%s: failed to reset counters on PHY (%d)",
716 __func__, i);
717 }
718 }
719 }
720
721 rval = pmcs_get_time_stamp(pwp, &pwp->fw_timestamp, &pwp->hrtimestamp);
722 if (rval) {
723 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
724 "%s: Failed to obtain firmware timestamp", __func__);
725 } else {
726 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
727 "Firmware timestamp: 0x%" PRIx64, pwp->fw_timestamp);
728 }
729
730 return (0);
731 }
732
733 /*
734 * Called with PHY locked
735 */
736 int
pmcs_reset_phy(pmcs_hw_t * pwp,pmcs_phy_t * pptr,uint8_t type)737 pmcs_reset_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t type)
738 {
739 uint32_t *msg;
740 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2];
741 const char *mbar;
742 uint32_t amt;
743 uint32_t pdevid;
744 uint32_t stsoff;
745 uint32_t status;
746 int result, level, phynum;
747 struct pmcwork *pwrk;
748 pmcs_iport_t *iport;
749 uint32_t htag;
750
751 ASSERT(mutex_owned(&pptr->phy_lock));
752
753 bzero(iomb, PMCS_QENTRY_SIZE);
754 phynum = pptr->phynum;
755 level = pptr->level;
756 if (level > 0) {
757 pdevid = pptr->parent->device_id;
758 } else if ((level == 0) && (pptr->dtype == EXPANDER)) {
759 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target,
760 "%s: Not resetting HBA PHY @ %s", __func__, pptr->path);
761 return (0);
762 }
763
764 if (!pptr->iport || !pptr->valid_device_id) {
765 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target,
766 "%s: Can't reach PHY %s", __func__, pptr->path);
767 return (0);
768 }
769
770 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
771
772 if (pwrk == NULL) {
773 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__);
774 return (ENOMEM);
775 }
776
777 pwrk->arg = iomb;
778
779 /*
780 * If level > 0, we need to issue an SMP_REQUEST with a PHY_CONTROL
781 * function to do either a link reset or hard reset. If level == 0,
782 * then we do a LOCAL_PHY_CONTROL IOMB to do link/hard reset to the
783 * root (local) PHY
784 */
785 if (level) {
786 stsoff = 2;
787 iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
788 PMCIN_SMP_REQUEST));
789 iomb[1] = LE_32(pwrk->htag);
790 iomb[2] = LE_32(pdevid);
791 iomb[3] = LE_32(40 << SMP_REQUEST_LENGTH_SHIFT);
792 /*
793 * Send SMP PHY CONTROL/HARD or LINK RESET
794 */
795 iomb[4] = BE_32(0x40910000);
796 iomb[5] = 0;
797
798 if (type == PMCS_PHYOP_HARD_RESET) {
799 mbar = "SMP PHY CONTROL/HARD RESET";
800 iomb[6] = BE_32((phynum << 16) |
801 (PMCS_PHYOP_HARD_RESET << 8));
802 } else {
803 mbar = "SMP PHY CONTROL/LINK RESET";
804 iomb[6] = BE_32((phynum << 16) |
805 (PMCS_PHYOP_LINK_RESET << 8));
806 }
807 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
808 "%s: sending %s to %s for phy 0x%x",
809 __func__, mbar, pptr->parent->path, pptr->phynum);
810 amt = 7;
811 } else {
812 /*
813 * Unlike most other Outbound messages, status for
814 * a local phy operation is in DWORD 3.
815 */
816 stsoff = 3;
817 iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
818 PMCIN_LOCAL_PHY_CONTROL));
819 iomb[1] = LE_32(pwrk->htag);
820 if (type == PMCS_PHYOP_LINK_RESET) {
821 mbar = "LOCAL PHY LINK RESET";
822 iomb[2] = LE_32((PMCS_PHYOP_LINK_RESET << 8) | phynum);
823 } else {
824 mbar = "LOCAL PHY HARD RESET";
825 iomb[2] = LE_32((PMCS_PHYOP_HARD_RESET << 8) | phynum);
826 }
827 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
828 "%s: sending %s to %s", __func__, mbar, pptr->path);
829 amt = 3;
830 }
831
832 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
833 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
834 if (msg == NULL) {
835 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
836 pmcs_pwork(pwp, pwrk);
837 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__);
838 return (ENOMEM);
839 }
840 COPY_MESSAGE(msg, iomb, amt);
841 htag = pwrk->htag;
842
843 pmcs_hold_iport(pptr->iport);
844 iport = pptr->iport;
845 pmcs_smp_acquire(iport);
846 pwrk->state = PMCS_WORK_STATE_ONCHIP;
847 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
848 pmcs_unlock_phy(pptr);
849 WAIT_FOR(pwrk, 1000, result);
850 pmcs_pwork(pwp, pwrk);
851 pmcs_smp_release(iport);
852 pmcs_rele_iport(iport);
853 pmcs_lock_phy(pptr);
854 if (result) {
855 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
856
857 if (pmcs_abort(pwp, pptr, htag, 0, 0)) {
858 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
859 "%s: Unable to issue SMP abort for htag 0x%08x",
860 __func__, htag);
861 } else {
862 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
863 "%s: Issuing SMP ABORT for htag 0x%08x",
864 __func__, htag);
865 }
866 return (EIO);
867 }
868 status = LE_32(iomb[stsoff]);
869
870 if (status != PMCOUT_STATUS_OK) {
871 char buf[32];
872 const char *es = pmcs_status_str(status);
873 if (es == NULL) {
874 (void) snprintf(buf, sizeof (buf), "Status 0x%x",
875 status);
876 es = buf;
877 }
878 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
879 "%s: %s action returned %s for %s", __func__, mbar, es,
880 pptr->path);
881 return (status);
882 }
883
884 return (0);
885 }
886
887 /*
888 * Stop the (real) phys. No PHY or softstate locks are required as this only
889 * happens during detach.
890 */
891 void
pmcs_stop_phy(pmcs_hw_t * pwp,int phynum)892 pmcs_stop_phy(pmcs_hw_t *pwp, int phynum)
893 {
894 int result;
895 pmcs_phy_t *pptr;
896 uint32_t *msg;
897 struct pmcwork *pwrk;
898
899 pptr = pwp->root_phys + phynum;
900 if (pptr == NULL) {
901 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
902 "%s: unable to find port %d", __func__, phynum);
903 return;
904 }
905
906 if (pwp->phys_started & (1 << phynum)) {
907 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
908
909 if (pwrk == NULL) {
910 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL,
911 pmcs_nowrk, __func__);
912 return;
913 }
914
915 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
916 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
917
918 if (msg == NULL) {
919 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
920 pmcs_pwork(pwp, pwrk);
921 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL,
922 pmcs_nomsg, __func__);
923 return;
924 }
925
926 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_STOP));
927 msg[1] = LE_32(pwrk->htag);
928 msg[2] = LE_32(phynum);
929 pwrk->state = PMCS_WORK_STATE_ONCHIP;
930 /*
931 * Make this unconfigured now.
932 */
933 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
934 WAIT_FOR(pwrk, 1000, result);
935 pmcs_pwork(pwp, pwrk);
936 if (result) {
937 pmcs_prt(pwp, PMCS_PRT_DEBUG,
938 pptr, NULL, pmcs_timeo, __func__);
939 }
940
941 pwp->phys_started &= ~(1 << phynum);
942 }
943
944 pptr->configured = 0;
945 }
946
947 /*
948 * No locks should be required as this is only called during detach
949 */
950 void
pmcs_stop_phys(pmcs_hw_t * pwp)951 pmcs_stop_phys(pmcs_hw_t *pwp)
952 {
953 int i;
954 for (i = 0; i < pwp->nphy; i++) {
955 if ((pwp->phyid_block_mask & (1 << i)) == 0) {
956 pmcs_stop_phy(pwp, i);
957 }
958 }
959 }
960
961 /*
962 * Run SAS_DIAG_EXECUTE with cmd and cmd_desc passed.
963 * ERR_CNT_RESET: return status of cmd
964 * DIAG_REPORT_GET: return value of the counter
965 */
966 int
pmcs_sas_diag_execute(pmcs_hw_t * pwp,uint32_t cmd,uint32_t cmd_desc,uint8_t phynum)967 pmcs_sas_diag_execute(pmcs_hw_t *pwp, uint32_t cmd, uint32_t cmd_desc,
968 uint8_t phynum)
969 {
970 uint32_t htag, *ptr, status, msg[PMCS_MSG_SIZE << 1];
971 int result;
972 struct pmcwork *pwrk;
973
974 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
975 if (pwrk == NULL) {
976 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__);
977 return (DDI_FAILURE);
978 }
979 pwrk->arg = msg;
980 htag = pwrk->htag;
981 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_SAS_DIAG_EXECUTE));
982 msg[1] = LE_32(htag);
983 msg[2] = LE_32((cmd << PMCS_DIAG_CMD_SHIFT) |
984 (cmd_desc << PMCS_DIAG_CMD_DESC_SHIFT) | phynum);
985
986 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
987 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
988 if (ptr == NULL) {
989 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
990 pmcs_pwork(pwp, pwrk);
991 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__);
992 return (DDI_FAILURE);
993 }
994 COPY_MESSAGE(ptr, msg, 3);
995 pwrk->state = PMCS_WORK_STATE_ONCHIP;
996 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
997
998 WAIT_FOR(pwrk, 1000, result);
999 pmcs_pwork(pwp, pwrk);
1000 if (result) {
1001 pmcs_timed_out(pwp, htag, __func__);
1002 return (DDI_FAILURE);
1003 }
1004
1005 status = LE_32(msg[3]);
1006
1007 /* Return for counter reset */
1008 if (cmd == PMCS_ERR_CNT_RESET)
1009 return (status);
1010
1011 /* Return for counter value */
1012 if (status) {
1013 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1014 "%s: failed, status (0x%x)", __func__, status);
1015 return (DDI_FAILURE);
1016 }
1017 return (LE_32(msg[4]));
1018 }
1019
1020 /* Get the current value of the counter for desc on phynum and return it. */
1021 int
pmcs_get_diag_report(pmcs_hw_t * pwp,uint32_t desc,uint8_t phynum)1022 pmcs_get_diag_report(pmcs_hw_t *pwp, uint32_t desc, uint8_t phynum)
1023 {
1024 return (pmcs_sas_diag_execute(pwp, PMCS_DIAG_REPORT_GET, desc, phynum));
1025 }
1026
1027 /* Clear all of the counters for phynum. Returns the status of the command. */
1028 int
pmcs_clear_diag_counters(pmcs_hw_t * pwp,uint8_t phynum)1029 pmcs_clear_diag_counters(pmcs_hw_t *pwp, uint8_t phynum)
1030 {
1031 uint32_t cmd = PMCS_ERR_CNT_RESET;
1032 uint32_t cmd_desc;
1033
1034 cmd_desc = PMCS_INVALID_DWORD_CNT;
1035 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
1036 return (DDI_FAILURE);
1037
1038 cmd_desc = PMCS_DISPARITY_ERR_CNT;
1039 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
1040 return (DDI_FAILURE);
1041
1042 cmd_desc = PMCS_LOST_DWORD_SYNC_CNT;
1043 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
1044 return (DDI_FAILURE);
1045
1046 cmd_desc = PMCS_RESET_FAILED_CNT;
1047 if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
1048 return (DDI_FAILURE);
1049
1050 return (DDI_SUCCESS);
1051 }
1052
1053 /*
1054 * Get firmware timestamp
1055 */
1056 static int
pmcs_get_time_stamp(pmcs_hw_t * pwp,uint64_t * fw_ts,hrtime_t * sys_hr_ts)1057 pmcs_get_time_stamp(pmcs_hw_t *pwp, uint64_t *fw_ts, hrtime_t *sys_hr_ts)
1058 {
1059 uint32_t htag, *ptr, msg[PMCS_MSG_SIZE << 1];
1060 int result;
1061 struct pmcwork *pwrk;
1062
1063 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
1064 if (pwrk == NULL) {
1065 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nowrk, __func__);
1066 return (-1);
1067 }
1068 pwrk->arg = msg;
1069 htag = pwrk->htag;
1070 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_GET_TIME_STAMP));
1071 msg[1] = LE_32(pwrk->htag);
1072
1073 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1074 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1075 if (ptr == NULL) {
1076 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1077 pmcs_pwork(pwp, pwrk);
1078 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, pmcs_nomsg, __func__);
1079 return (-1);
1080 }
1081 COPY_MESSAGE(ptr, msg, 2);
1082 pwrk->state = PMCS_WORK_STATE_ONCHIP;
1083 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1084
1085 WAIT_FOR(pwrk, 1000, result);
1086 pmcs_pwork(pwp, pwrk);
1087 if (result) {
1088 pmcs_timed_out(pwp, htag, __func__);
1089 return (-1);
1090 }
1091
1092 mutex_enter(&pmcs_trace_lock);
1093 *sys_hr_ts = gethrtime();
1094 gethrestime(&pwp->sys_timestamp);
1095 *fw_ts = LE_32(msg[2]) | (((uint64_t)LE_32(msg[3])) << 32);
1096 mutex_exit(&pmcs_trace_lock);
1097 return (0);
1098 }
1099
1100 /*
1101 * Dump all pertinent registers
1102 */
1103
1104 void
pmcs_register_dump(pmcs_hw_t * pwp)1105 pmcs_register_dump(pmcs_hw_t *pwp)
1106 {
1107 int i;
1108 uint32_t val;
1109
1110 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump start",
1111 ddi_get_instance(pwp->dip));
1112 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
1113 "OBDB (intr): 0x%08x (mask): 0x%08x (clear): 0x%08x",
1114 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB),
1115 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_MASK),
1116 pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR));
1117 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH0: 0x%08x",
1118 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0));
1119 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH1: 0x%08x",
1120 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1));
1121 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH2: 0x%08x",
1122 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2));
1123 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "SCRATCH3: 0x%08x",
1124 pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH3));
1125 for (i = 0; i < PMCS_NIQ; i++) {
1126 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "IQ %d: CI %u PI %u",
1127 i, pmcs_rd_iqci(pwp, i), pmcs_rd_iqpi(pwp, i));
1128 }
1129 for (i = 0; i < PMCS_NOQ; i++) {
1130 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "OQ %d: CI %u PI %u",
1131 i, pmcs_rd_oqci(pwp, i), pmcs_rd_oqpi(pwp, i));
1132 }
1133 val = pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE);
1134 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
1135 "GST TABLE BASE: 0x%08x (STATE=0x%x QF=%d GSTLEN=%d HMI_ERR=0x%x)",
1136 val, PMCS_MPI_S(val), PMCS_QF(val), PMCS_GSTLEN(val) * 4,
1137 PMCS_HMI_ERR(val));
1138 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ0: 0x%08x",
1139 pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ0));
1140 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IQFRZ1: 0x%08x",
1141 pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ1));
1142 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE MSGU TICK: 0x%08x",
1143 pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK));
1144 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "GST TABLE IOP TICK: 0x%08x",
1145 pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK));
1146 for (i = 0; i < pwp->nphy; i++) {
1147 uint32_t rerrf, pinfo, started = 0, link = 0;
1148 pinfo = pmcs_rd_gst_tbl(pwp, PMCS_GST_PHY_INFO(i));
1149 if (pinfo & 1) {
1150 started = 1;
1151 link = pinfo & 2;
1152 }
1153 rerrf = pmcs_rd_gst_tbl(pwp, PMCS_GST_RERR_INFO(i));
1154 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
1155 "GST TABLE PHY%d STARTED=%d LINK=%d RERR=0x%08x",
1156 i, started, link, rerrf);
1157 }
1158 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "pmcs%d: Register dump end",
1159 ddi_get_instance(pwp->dip));
1160 }
1161
1162 /*
1163 * Handle SATA Abort and other error processing
1164 */
1165 int
pmcs_abort_handler(pmcs_hw_t * pwp)1166 pmcs_abort_handler(pmcs_hw_t *pwp)
1167 {
1168 pmcs_phy_t *pptr, *pnext, *pnext_uplevel[PMCS_MAX_XPND];
1169 pmcs_xscsi_t *tgt;
1170 int r, level = 0;
1171
1172 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s", __func__);
1173
1174 mutex_enter(&pwp->lock);
1175 pptr = pwp->root_phys;
1176 mutex_exit(&pwp->lock);
1177
1178 while (pptr) {
1179 /*
1180 * XXX: Need to make sure this doesn't happen
1181 * XXX: when non-NCQ commands are running.
1182 */
1183 pmcs_lock_phy(pptr);
1184 if (pptr->need_rl_ext) {
1185 ASSERT(pptr->dtype == SATA);
1186 if (pmcs_acquire_scratch(pwp, B_FALSE)) {
1187 goto next_phy;
1188 }
1189 r = pmcs_sata_abort_ncq(pwp, pptr);
1190 pmcs_release_scratch(pwp);
1191 if (r == ENOMEM) {
1192 goto next_phy;
1193 }
1194 if (r) {
1195 r = pmcs_reset_phy(pwp, pptr,
1196 PMCS_PHYOP_LINK_RESET);
1197 if (r == ENOMEM) {
1198 goto next_phy;
1199 }
1200 /* what if other failures happened? */
1201 pptr->abort_pending = 1;
1202 pptr->abort_sent = 0;
1203 }
1204 }
1205 if (pptr->abort_pending == 0 || pptr->abort_sent) {
1206 goto next_phy;
1207 }
1208 pptr->abort_pending = 0;
1209 if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) == ENOMEM) {
1210 pptr->abort_pending = 1;
1211 goto next_phy;
1212 }
1213 pptr->abort_sent = 1;
1214
1215 /*
1216 * If the iport is no longer active, flush the queues
1217 */
1218 if ((pptr->iport == NULL) ||
1219 (pptr->iport->ua_state != UA_ACTIVE)) {
1220 tgt = pptr->target;
1221 if (tgt != NULL) {
1222 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt,
1223 "%s: Clearing target 0x%p, inactive iport",
1224 __func__, (void *) tgt);
1225 mutex_enter(&tgt->statlock);
1226 pmcs_clear_xp(pwp, tgt);
1227 mutex_exit(&tgt->statlock);
1228 }
1229 }
1230
1231 next_phy:
1232 if (pptr->children) {
1233 pnext = pptr->children;
1234 pnext_uplevel[level++] = pptr->sibling;
1235 } else {
1236 pnext = pptr->sibling;
1237 while ((pnext == NULL) && (level > 0)) {
1238 pnext = pnext_uplevel[--level];
1239 }
1240 }
1241
1242 pmcs_unlock_phy(pptr);
1243 pptr = pnext;
1244 }
1245
1246 return (0);
1247 }
1248
1249 /*
1250 * Register a device (get a device handle for it).
1251 * Called with PHY lock held.
1252 */
1253 int
pmcs_register_device(pmcs_hw_t * pwp,pmcs_phy_t * pptr)1254 pmcs_register_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
1255 {
1256 struct pmcwork *pwrk;
1257 int result = 0;
1258 uint32_t *msg;
1259 uint32_t tmp, status;
1260 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2];
1261
1262 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1263 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1264
1265 if (msg == NULL ||
1266 (pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) {
1267 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1268 result = ENOMEM;
1269 goto out;
1270 }
1271
1272 pwrk->arg = iomb;
1273 pwrk->dtype = pptr->dtype;
1274
1275 msg[1] = LE_32(pwrk->htag);
1276 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_REGISTER_DEVICE));
1277 tmp = PMCS_DEVREG_TLR |
1278 (pptr->link_rate << PMCS_DEVREG_LINK_RATE_SHIFT);
1279 if (IS_ROOT_PHY(pptr)) {
1280 msg[2] = LE_32(pptr->portid |
1281 (pptr->phynum << PMCS_PHYID_SHIFT));
1282 } else {
1283 msg[2] = LE_32(pptr->portid);
1284 }
1285 if (pptr->dtype == SATA) {
1286 if (IS_ROOT_PHY(pptr)) {
1287 tmp |= PMCS_DEVREG_TYPE_SATA_DIRECT;
1288 } else {
1289 tmp |= PMCS_DEVREG_TYPE_SATA;
1290 }
1291 } else {
1292 tmp |= PMCS_DEVREG_TYPE_SAS;
1293 }
1294 msg[3] = LE_32(tmp);
1295 msg[4] = LE_32(PMCS_DEVREG_IT_NEXUS_TIMEOUT);
1296 (void) memcpy(&msg[5], pptr->sas_address, 8);
1297
1298 CLEAN_MESSAGE(msg, 7);
1299 pwrk->state = PMCS_WORK_STATE_ONCHIP;
1300 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1301
1302 pmcs_unlock_phy(pptr);
1303 WAIT_FOR(pwrk, 250, result);
1304 pmcs_pwork(pwp, pwrk);
1305 pmcs_lock_phy(pptr);
1306
1307 if (result) {
1308 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
1309 result = ETIMEDOUT;
1310 goto out;
1311 }
1312 status = LE_32(iomb[2]);
1313 tmp = LE_32(iomb[3]);
1314 switch (status) {
1315 case PMCS_DEVREG_OK:
1316 case PMCS_DEVREG_DEVICE_ALREADY_REGISTERED:
1317 case PMCS_DEVREG_PHY_ALREADY_REGISTERED:
1318 if (pmcs_validate_devid(pwp->root_phys, pptr, tmp) == B_FALSE) {
1319 result = EEXIST;
1320 goto out;
1321 } else if (status != PMCS_DEVREG_OK) {
1322 if (tmp == 0xffffffff) { /* F/W bug */
1323 pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL,
1324 "%s: phy %s already has bogus devid 0x%x",
1325 __func__, pptr->path, tmp);
1326 result = EIO;
1327 goto out;
1328 } else {
1329 pmcs_prt(pwp, PMCS_PRT_INFO, pptr, NULL,
1330 "%s: phy %s already has a device id 0x%x",
1331 __func__, pptr->path, tmp);
1332 }
1333 }
1334 break;
1335 default:
1336 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
1337 "%s: status 0x%x when trying to register device %s",
1338 __func__, status, pptr->path);
1339 result = EIO;
1340 goto out;
1341 }
1342 pptr->device_id = tmp;
1343 pptr->valid_device_id = 1;
1344 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Phy %s/" SAS_ADDR_FMT
1345 " registered with device_id 0x%x (portid %d)", pptr->path,
1346 SAS_ADDR_PRT(pptr->sas_address), tmp, pptr->portid);
1347 out:
1348 return (result);
1349 }
1350
1351 /*
1352 * Deregister a device (remove a device handle).
1353 * Called with PHY locked.
1354 */
1355 void
pmcs_deregister_device(pmcs_hw_t * pwp,pmcs_phy_t * pptr)1356 pmcs_deregister_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
1357 {
1358 struct pmcwork *pwrk;
1359 uint32_t msg[PMCS_MSG_SIZE], *ptr, status;
1360 uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2];
1361 int result;
1362
1363 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
1364 if (pwrk == NULL) {
1365 return;
1366 }
1367
1368 pwrk->arg = iomb;
1369 pwrk->dtype = pptr->dtype;
1370 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1371 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1372 if (ptr == NULL) {
1373 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1374 pmcs_pwork(pwp, pwrk);
1375 return;
1376 }
1377 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
1378 PMCIN_DEREGISTER_DEVICE_HANDLE));
1379 msg[1] = LE_32(pwrk->htag);
1380 msg[2] = LE_32(pptr->device_id);
1381 pwrk->state = PMCS_WORK_STATE_ONCHIP;
1382 COPY_MESSAGE(ptr, msg, 3);
1383 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1384
1385 pmcs_unlock_phy(pptr);
1386 WAIT_FOR(pwrk, 250, result);
1387 pmcs_pwork(pwp, pwrk);
1388 pmcs_lock_phy(pptr);
1389
1390 if (result) {
1391 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, pmcs_timeo, __func__);
1392 return;
1393 }
1394 status = LE_32(iomb[2]);
1395 if (status != PMCOUT_STATUS_OK) {
1396 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
1397 "%s: status 0x%x when trying to deregister device %s",
1398 __func__, status, pptr->path);
1399 } else {
1400 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
1401 "%s: device %s deregistered", __func__, pptr->path);
1402 }
1403
1404 pptr->device_id = PMCS_INVALID_DEVICE_ID;
1405 pptr->configured = 0;
1406 pptr->deregister_wait = 0;
1407 pptr->valid_device_id = 0;
1408 }
1409
1410 /*
1411 * Deregister all registered devices.
1412 */
1413 void
pmcs_deregister_devices(pmcs_hw_t * pwp,pmcs_phy_t * phyp)1414 pmcs_deregister_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
1415 {
1416 /*
1417 * Start at the maximum level and walk back to level 0. This only
1418 * gets done during detach after all threads and timers have been
1419 * destroyed.
1420 */
1421 while (phyp) {
1422 if (phyp->children) {
1423 pmcs_deregister_devices(pwp, phyp->children);
1424 }
1425 pmcs_lock_phy(phyp);
1426 if (phyp->valid_device_id) {
1427 pmcs_deregister_device(pwp, phyp);
1428 }
1429 pmcs_unlock_phy(phyp);
1430 phyp = phyp->sibling;
1431 }
1432 }
1433
1434 /*
1435 * Perform a 'soft' reset on the PMC chip
1436 */
1437 int
pmcs_soft_reset(pmcs_hw_t * pwp,boolean_t no_restart)1438 pmcs_soft_reset(pmcs_hw_t *pwp, boolean_t no_restart)
1439 {
1440 uint32_t s2, sfrbits, gsm, rapchk, wapchk, wdpchk, spc, tsmode;
1441 pmcs_phy_t *pptr;
1442 char *msg = NULL;
1443 int i;
1444
1445 /*
1446 * Disable interrupts
1447 */
1448 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
1449 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
1450
1451 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%s", __func__);
1452
1453 if (pwp->locks_initted) {
1454 mutex_enter(&pwp->lock);
1455 }
1456 pwp->blocked = 1;
1457
1458 /*
1459 * Clear our softstate copies of the MSGU and IOP heartbeats.
1460 */
1461 pwp->last_msgu_tick = pwp->last_iop_tick = 0;
1462
1463 /*
1464 * Step 1
1465 */
1466 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2);
1467 if ((s2 & PMCS_MSGU_HOST_SOFT_RESET_READY) == 0) {
1468 pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE);
1469 pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE);
1470 for (i = 0; i < 100; i++) {
1471 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) &
1472 PMCS_MSGU_HOST_SOFT_RESET_READY;
1473 if (s2) {
1474 break;
1475 }
1476 drv_usecwait(10000);
1477 }
1478 s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) &
1479 PMCS_MSGU_HOST_SOFT_RESET_READY;
1480 if (s2 == 0) {
1481 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1482 "%s: PMCS_MSGU_HOST_SOFT_RESET_READY never came "
1483 "ready", __func__);
1484 pmcs_register_dump(pwp);
1485 if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
1486 PMCS_MSGU_CPU_SOFT_RESET_READY) == 0 ||
1487 (pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) &
1488 PMCS_MSGU_CPU_SOFT_RESET_READY) == 0) {
1489 pwp->state = STATE_DEAD;
1490 pwp->blocked = 0;
1491 if (pwp->locks_initted) {
1492 mutex_exit(&pwp->lock);
1493 }
1494 return (-1);
1495 }
1496 }
1497 }
1498
1499 /*
1500 * Step 2
1501 */
1502 pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_IOP, 0);
1503 drv_usecwait(10);
1504 pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_AAP1, 0);
1505 drv_usecwait(10);
1506 pmcs_wr_topunit(pwp, PMCS_EVENT_INT_ENABLE, 0);
1507 drv_usecwait(10);
1508 pmcs_wr_topunit(pwp, PMCS_EVENT_INT_STAT,
1509 pmcs_rd_topunit(pwp, PMCS_EVENT_INT_STAT));
1510 drv_usecwait(10);
1511 pmcs_wr_topunit(pwp, PMCS_ERROR_INT_ENABLE, 0);
1512 drv_usecwait(10);
1513 pmcs_wr_topunit(pwp, PMCS_ERROR_INT_STAT,
1514 pmcs_rd_topunit(pwp, PMCS_ERROR_INT_STAT));
1515 drv_usecwait(10);
1516
1517 sfrbits = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
1518 PMCS_MSGU_AAP_SFR_PROGRESS;
1519 sfrbits ^= PMCS_MSGU_AAP_SFR_PROGRESS;
1520 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "PMCS_MSGU_HOST_SCRATCH0 "
1521 "%08x -> %08x", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0),
1522 HST_SFT_RESET_SIG);
1523 pmcs_wr_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0, HST_SFT_RESET_SIG);
1524
1525 /*
1526 * Step 3
1527 */
1528 gsm = pmcs_rd_gsm_reg(pwp, 0, GSM_CFG_AND_RESET);
1529 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm,
1530 gsm & ~PMCS_SOFT_RESET_BITS);
1531 pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm & ~PMCS_SOFT_RESET_BITS);
1532
1533 /*
1534 * Step 4
1535 */
1536 rapchk = pmcs_rd_gsm_reg(pwp, 0, READ_ADR_PARITY_CHK_EN);
1537 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN "
1538 "%08x -> %08x", rapchk, 0);
1539 pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, 0);
1540 wapchk = pmcs_rd_gsm_reg(pwp, 0, WRITE_ADR_PARITY_CHK_EN);
1541 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN "
1542 "%08x -> %08x", wapchk, 0);
1543 pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, 0);
1544 wdpchk = pmcs_rd_gsm_reg(pwp, 0, WRITE_DATA_PARITY_CHK_EN);
1545 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN "
1546 "%08x -> %08x", wdpchk, 0);
1547 pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, 0);
1548
1549 /*
1550 * Step 5
1551 */
1552 drv_usecwait(100);
1553
1554 /*
1555 * Step 5.5 (Temporary workaround for 1.07.xx Beta)
1556 */
1557 tsmode = pmcs_rd_gsm_reg(pwp, 0, PMCS_GPIO_TRISTATE_MODE_ADDR);
1558 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GPIO TSMODE %08x -> %08x",
1559 tsmode, tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1));
1560 pmcs_wr_gsm_reg(pwp, PMCS_GPIO_TRISTATE_MODE_ADDR,
1561 tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1));
1562 drv_usecwait(10);
1563
1564 /*
1565 * Step 6
1566 */
1567 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
1568 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x",
1569 spc, spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
1570 pmcs_wr_topunit(pwp, PMCS_SPC_RESET,
1571 spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
1572 drv_usecwait(10);
1573
1574 /*
1575 * Step 7
1576 */
1577 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
1578 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x",
1579 spc, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB));
1580 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB));
1581
1582 /*
1583 * Step 8
1584 */
1585 drv_usecwait(100);
1586
1587 /*
1588 * Step 9
1589 */
1590 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
1591 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x",
1592 spc, spc | (BDMA_CORE_RSTB|OSSP_RSTB));
1593 pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc | (BDMA_CORE_RSTB|OSSP_RSTB));
1594
1595 /*
1596 * Step 10
1597 */
1598 drv_usecwait(100);
1599
1600 /*
1601 * Step 11
1602 */
1603 gsm = pmcs_rd_gsm_reg(pwp, 0, GSM_CFG_AND_RESET);
1604 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "GSM %08x -> %08x", gsm,
1605 gsm | PMCS_SOFT_RESET_BITS);
1606 pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm | PMCS_SOFT_RESET_BITS);
1607 drv_usecwait(10);
1608
1609 /*
1610 * Step 12
1611 */
1612 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "READ_ADR_PARITY_CHK_EN "
1613 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, READ_ADR_PARITY_CHK_EN),
1614 rapchk);
1615 pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, rapchk);
1616 drv_usecwait(10);
1617 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_ADR_PARITY_CHK_EN "
1618 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, WRITE_ADR_PARITY_CHK_EN),
1619 wapchk);
1620 pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, wapchk);
1621 drv_usecwait(10);
1622 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "WRITE_DATA_PARITY_CHK_EN "
1623 "%08x -> %08x", pmcs_rd_gsm_reg(pwp, 0, WRITE_DATA_PARITY_CHK_EN),
1624 wapchk);
1625 pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, wdpchk);
1626 drv_usecwait(10);
1627
1628 /*
1629 * Step 13
1630 */
1631 spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
1632 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "SPC_RESET %08x -> %08x",
1633 spc, spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
1634 pmcs_wr_topunit(pwp, PMCS_SPC_RESET,
1635 spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
1636
1637 /*
1638 * Step 14
1639 */
1640 drv_usecwait(100);
1641
1642 /*
1643 * Step 15
1644 */
1645 for (spc = 0, i = 0; i < 1000; i++) {
1646 drv_usecwait(1000);
1647 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1);
1648 if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) == sfrbits) {
1649 break;
1650 }
1651 }
1652
1653 if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) != sfrbits) {
1654 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1655 "SFR didn't toggle (sfr 0x%x)", spc);
1656 pwp->state = STATE_DEAD;
1657 pwp->blocked = 0;
1658 if (pwp->locks_initted) {
1659 mutex_exit(&pwp->lock);
1660 }
1661 return (-1);
1662 }
1663
1664 /*
1665 * Step 16
1666 */
1667 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
1668 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
1669
1670 /*
1671 * Wait for up to 5 seconds for AAP state to come either ready or error.
1672 */
1673 for (i = 0; i < 50; i++) {
1674 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
1675 PMCS_MSGU_AAP_STATE_MASK;
1676 if (spc == PMCS_MSGU_AAP_STATE_ERROR ||
1677 spc == PMCS_MSGU_AAP_STATE_READY) {
1678 break;
1679 }
1680 drv_usecwait(100000);
1681 }
1682 spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1);
1683 if ((spc & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) {
1684 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1685 "soft reset failed (state 0x%x)", spc);
1686 pwp->state = STATE_DEAD;
1687 pwp->blocked = 0;
1688 if (pwp->locks_initted) {
1689 mutex_exit(&pwp->lock);
1690 }
1691 return (-1);
1692 }
1693
1694 /* Clear the firmware log */
1695 if (pwp->fwlogp) {
1696 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE);
1697 }
1698
1699 /* Reset our queue indices and entries */
1700 bzero(pwp->shadow_iqpi, sizeof (pwp->shadow_iqpi));
1701 bzero(pwp->last_iqci, sizeof (pwp->last_iqci));
1702 bzero(pwp->last_htag, sizeof (pwp->last_htag));
1703 for (i = 0; i < PMCS_NIQ; i++) {
1704 if (pwp->iqp[i]) {
1705 bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
1706 pmcs_wr_iqpi(pwp, i, 0);
1707 pmcs_wr_iqci(pwp, i, 0);
1708 }
1709 }
1710 for (i = 0; i < PMCS_NOQ; i++) {
1711 if (pwp->oqp[i]) {
1712 bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
1713 pmcs_wr_oqpi(pwp, i, 0);
1714 pmcs_wr_oqci(pwp, i, 0);
1715 }
1716
1717 }
1718
1719 if (pwp->state == STATE_DEAD || pwp->state == STATE_UNPROBING ||
1720 pwp->state == STATE_PROBING || pwp->locks_initted == 0) {
1721 pwp->blocked = 0;
1722 if (pwp->locks_initted) {
1723 mutex_exit(&pwp->lock);
1724 }
1725 return (0);
1726 }
1727
1728 /*
1729 * Return at this point if we dont need to startup.
1730 */
1731 if (no_restart) {
1732 return (0);
1733 }
1734
1735 ASSERT(pwp->locks_initted != 0);
1736
1737 /*
1738 * Flush the target queues and clear each target's PHY
1739 */
1740 if (pwp->targets) {
1741 for (i = 0; i < pwp->max_dev; i++) {
1742 pmcs_xscsi_t *xp = pwp->targets[i];
1743
1744 if (xp == NULL) {
1745 continue;
1746 }
1747
1748 mutex_enter(&xp->statlock);
1749 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES);
1750 xp->phy = NULL;
1751 mutex_exit(&xp->statlock);
1752 }
1753 }
1754
1755 /*
1756 * Zero out the ports list, free non root phys, clear root phys
1757 */
1758 bzero(pwp->ports, sizeof (pwp->ports));
1759 pmcs_free_all_phys(pwp, pwp->root_phys);
1760 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
1761 pmcs_lock_phy(pptr);
1762 pmcs_clear_phy(pwp, pptr);
1763 pptr->target = NULL;
1764 pmcs_unlock_phy(pptr);
1765 }
1766
1767 /*
1768 * Restore Interrupt Mask
1769 */
1770 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask);
1771 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
1772
1773 pwp->mpi_table_setup = 0;
1774 mutex_exit(&pwp->lock);
1775
1776 /*
1777 * Set up MPI again.
1778 */
1779 if (pmcs_setup(pwp)) {
1780 msg = "unable to setup MPI tables again";
1781 goto fail_restart;
1782 }
1783 pmcs_report_fwversion(pwp);
1784
1785 /*
1786 * Restart MPI
1787 */
1788 if (pmcs_start_mpi(pwp)) {
1789 msg = "unable to restart MPI again";
1790 goto fail_restart;
1791 }
1792
1793 mutex_enter(&pwp->lock);
1794 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
1795 mutex_exit(&pwp->lock);
1796
1797 /*
1798 * Run any completions
1799 */
1800 PMCS_CQ_RUN(pwp);
1801
1802 /*
1803 * Delay
1804 */
1805 drv_usecwait(1000000);
1806 return (0);
1807
1808 fail_restart:
1809 mutex_enter(&pwp->lock);
1810 pwp->state = STATE_DEAD;
1811 mutex_exit(&pwp->lock);
1812 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
1813 "%s: Failed: %s", __func__, msg);
1814 return (-1);
1815 }
1816
1817
1818 /*
1819 * Perform a 'hot' reset, which will soft reset the chip and
1820 * restore the state back to pre-reset context. Called with pwp
1821 * lock held.
1822 */
1823 int
pmcs_hot_reset(pmcs_hw_t * pwp)1824 pmcs_hot_reset(pmcs_hw_t *pwp)
1825 {
1826 pmcs_iport_t *iport;
1827
1828 ASSERT(mutex_owned(&pwp->lock));
1829 pwp->state = STATE_IN_RESET;
1830
1831 /*
1832 * For any iports on this HBA, report empty target sets and
1833 * then tear them down.
1834 */
1835 rw_enter(&pwp->iports_lock, RW_READER);
1836 for (iport = list_head(&pwp->iports); iport != NULL;
1837 iport = list_next(&pwp->iports, iport)) {
1838 mutex_enter(&iport->lock);
1839 (void) scsi_hba_tgtmap_set_begin(iport->iss_tgtmap);
1840 (void) scsi_hba_tgtmap_set_end(iport->iss_tgtmap, 0);
1841 pmcs_iport_teardown_phys(iport);
1842 mutex_exit(&iport->lock);
1843 }
1844 rw_exit(&pwp->iports_lock);
1845
1846 /* Grab a register dump, in the event that reset fails */
1847 pmcs_register_dump_int(pwp);
1848 mutex_exit(&pwp->lock);
1849
1850 /* Ensure discovery is not running before we proceed */
1851 mutex_enter(&pwp->config_lock);
1852 while (pwp->configuring) {
1853 cv_wait(&pwp->config_cv, &pwp->config_lock);
1854 }
1855 mutex_exit(&pwp->config_lock);
1856
1857 /* Issue soft reset and clean up related softstate */
1858 if (pmcs_soft_reset(pwp, B_FALSE)) {
1859 /*
1860 * Disable interrupts, in case we got far enough along to
1861 * enable them, then fire off ereport and service impact.
1862 */
1863 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1864 "%s: failed soft reset", __func__);
1865 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
1866 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
1867 pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE);
1868 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
1869 mutex_enter(&pwp->lock);
1870 pwp->state = STATE_DEAD;
1871 return (DDI_FAILURE);
1872 }
1873
1874 mutex_enter(&pwp->lock);
1875 pwp->state = STATE_RUNNING;
1876 mutex_exit(&pwp->lock);
1877
1878 /*
1879 * Finally, restart the phys, which will bring the iports back
1880 * up and eventually result in discovery running.
1881 */
1882 if (pmcs_start_phys(pwp)) {
1883 /* We should be up and running now, so retry */
1884 if (pmcs_start_phys(pwp)) {
1885 /* Apparently unable to restart PHYs, fail */
1886 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1887 "%s: failed to restart PHYs after soft reset",
1888 __func__);
1889 mutex_enter(&pwp->lock);
1890 return (DDI_FAILURE);
1891 }
1892 }
1893
1894 mutex_enter(&pwp->lock);
1895 return (DDI_SUCCESS);
1896 }
1897
1898 /*
1899 * Reset a device or a logical unit.
1900 */
1901 int
pmcs_reset_dev(pmcs_hw_t * pwp,pmcs_phy_t * pptr,uint64_t lun)1902 pmcs_reset_dev(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint64_t lun)
1903 {
1904 int rval = 0;
1905
1906 if (pptr == NULL) {
1907 return (ENXIO);
1908 }
1909
1910 pmcs_lock_phy(pptr);
1911 if (pptr->dtype == SAS) {
1912 /*
1913 * Some devices do not support SAS_I_T_NEXUS_RESET as
1914 * it is not a mandatory (in SAM4) task management
1915 * function, while LOGIC_UNIT_RESET is mandatory.
1916 *
1917 * The problem here is that we need to iterate over
1918 * all known LUNs to emulate the semantics of
1919 * "RESET_TARGET".
1920 *
1921 * XXX: FIX ME
1922 */
1923 if (lun == (uint64_t)-1) {
1924 lun = 0;
1925 }
1926 rval = pmcs_ssp_tmf(pwp, pptr, SAS_LOGICAL_UNIT_RESET, 0, lun,
1927 NULL);
1928 } else if (pptr->dtype == SATA) {
1929 if (lun != 0ull) {
1930 pmcs_unlock_phy(pptr);
1931 return (EINVAL);
1932 }
1933 rval = pmcs_reset_phy(pwp, pptr, PMCS_PHYOP_LINK_RESET);
1934 } else {
1935 pmcs_unlock_phy(pptr);
1936 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
1937 "%s: cannot reset a SMP device yet (%s)",
1938 __func__, pptr->path);
1939 return (EINVAL);
1940 }
1941
1942 /*
1943 * Now harvest any commands killed by this action
1944 * by issuing an ABORT for all commands on this device.
1945 *
1946 * We do this even if the the tmf or reset fails (in case there
1947 * are any dead commands around to be harvested *anyway*).
1948 * We don't have to await for the abort to complete.
1949 */
1950 if (pmcs_abort(pwp, pptr, 0, 1, 0)) {
1951 pptr->abort_pending = 1;
1952 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
1953 }
1954
1955 pmcs_unlock_phy(pptr);
1956 return (rval);
1957 }
1958
1959 /*
1960 * Called with PHY locked.
1961 */
1962 static int
pmcs_get_device_handle(pmcs_hw_t * pwp,pmcs_phy_t * pptr)1963 pmcs_get_device_handle(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
1964 {
1965 if (pptr->valid_device_id == 0) {
1966 int result = pmcs_register_device(pwp, pptr);
1967
1968 /*
1969 * If we changed while registering, punt
1970 */
1971 if (pptr->changed) {
1972 RESTART_DISCOVERY(pwp);
1973 return (-1);
1974 }
1975
1976 /*
1977 * If we had a failure to register, check against errors.
1978 * An ENOMEM error means we just retry (temp resource shortage).
1979 */
1980 if (result == ENOMEM) {
1981 PHY_CHANGED(pwp, pptr);
1982 RESTART_DISCOVERY(pwp);
1983 return (-1);
1984 }
1985
1986 /*
1987 * An ETIMEDOUT error means we retry (if our counter isn't
1988 * exhausted)
1989 */
1990 if (result == ETIMEDOUT) {
1991 if (ddi_get_lbolt() < pptr->config_stop) {
1992 PHY_CHANGED(pwp, pptr);
1993 RESTART_DISCOVERY(pwp);
1994 } else {
1995 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
1996 "%s: Retries exhausted for %s, killing",
1997 __func__, pptr->path);
1998 pptr->config_stop = 0;
1999 pmcs_kill_changed(pwp, pptr, 0);
2000 }
2001 return (-1);
2002 }
2003 /*
2004 * Other errors or no valid device id is fatal, but don't
2005 * preclude a future action.
2006 */
2007 if (result || pptr->valid_device_id == 0) {
2008 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
2009 "%s: %s could not be registered", __func__,
2010 pptr->path);
2011 return (-1);
2012 }
2013 }
2014 return (0);
2015 }
2016
2017 int
pmcs_iport_tgtmap_create(pmcs_iport_t * iport)2018 pmcs_iport_tgtmap_create(pmcs_iport_t *iport)
2019 {
2020 ASSERT(iport);
2021 if (iport == NULL)
2022 return (B_FALSE);
2023
2024 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__);
2025
2026 /* create target map */
2027 if (scsi_hba_tgtmap_create(iport->dip, SCSI_TM_FULLSET,
2028 tgtmap_csync_usec, tgtmap_stable_usec, (void *)iport,
2029 pmcs_tgtmap_activate_cb, pmcs_tgtmap_deactivate_cb,
2030 &iport->iss_tgtmap) != DDI_SUCCESS) {
2031 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG, NULL, NULL,
2032 "%s: failed to create tgtmap", __func__);
2033 return (B_FALSE);
2034 }
2035 return (B_TRUE);
2036 }
2037
2038 int
pmcs_iport_tgtmap_destroy(pmcs_iport_t * iport)2039 pmcs_iport_tgtmap_destroy(pmcs_iport_t *iport)
2040 {
2041 ASSERT(iport && iport->iss_tgtmap);
2042 if ((iport == NULL) || (iport->iss_tgtmap == NULL))
2043 return (B_FALSE);
2044
2045 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s", __func__);
2046
2047 /* destroy target map */
2048 scsi_hba_tgtmap_destroy(iport->iss_tgtmap);
2049 return (B_TRUE);
2050 }
2051
2052 /*
2053 * Remove all phys from an iport's phymap and empty it's phylist.
2054 * Called when a port has been reset by the host (see pmcs_intr.c)
2055 * or prior to issuing a soft reset if we detect a stall on the chip
2056 * (see pmcs_attach.c).
2057 */
2058 void
pmcs_iport_teardown_phys(pmcs_iport_t * iport)2059 pmcs_iport_teardown_phys(pmcs_iport_t *iport)
2060 {
2061 pmcs_hw_t *pwp;
2062 sas_phymap_phys_t *phys;
2063 int phynum;
2064
2065 ASSERT(iport);
2066 ASSERT(mutex_owned(&iport->lock));
2067 pwp = iport->pwp;
2068 ASSERT(pwp);
2069
2070 /*
2071 * Remove all phys from the iport handle's phy list, unset its
2072 * primary phy and update its state.
2073 */
2074 pmcs_remove_phy_from_iport(iport, NULL);
2075 iport->pptr = NULL;
2076 iport->ua_state = UA_PEND_DEACTIVATE;
2077
2078 /* Remove all phys from the phymap */
2079 phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua);
2080 if (phys) {
2081 while ((phynum = sas_phymap_phys_next(phys)) != -1) {
2082 (void) sas_phymap_phy_rem(pwp->hss_phymap, phynum);
2083 }
2084 sas_phymap_phys_free(phys);
2085 }
2086 }
2087
2088 /*
2089 * Query the phymap and populate the iport handle passed in.
2090 * Called with iport lock held.
2091 */
2092 int
pmcs_iport_configure_phys(pmcs_iport_t * iport)2093 pmcs_iport_configure_phys(pmcs_iport_t *iport)
2094 {
2095 pmcs_hw_t *pwp;
2096 pmcs_phy_t *pptr;
2097 sas_phymap_phys_t *phys;
2098 int phynum;
2099 int inst;
2100
2101 ASSERT(iport);
2102 ASSERT(mutex_owned(&iport->lock));
2103 pwp = iport->pwp;
2104 ASSERT(pwp);
2105 inst = ddi_get_instance(iport->dip);
2106
2107 mutex_enter(&pwp->lock);
2108 ASSERT(pwp->root_phys != NULL);
2109
2110 /*
2111 * Query the phymap regarding the phys in this iport and populate
2112 * the iport's phys list. Hereafter this list is maintained via
2113 * port up and down events in pmcs_intr.c
2114 */
2115 ASSERT(list_is_empty(&iport->phys));
2116 phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua);
2117 ASSERT(phys != NULL);
2118 while ((phynum = sas_phymap_phys_next(phys)) != -1) {
2119 /* Grab the phy pointer from root_phys */
2120 pptr = pwp->root_phys + phynum;
2121 ASSERT(pptr);
2122 pmcs_lock_phy(pptr);
2123 ASSERT(pptr->phynum == phynum);
2124
2125 /*
2126 * Set a back pointer in the phy to this iport.
2127 */
2128 pptr->iport = iport;
2129
2130 /*
2131 * If this phy is the primary, set a pointer to it on our
2132 * iport handle, and set our portid from it.
2133 */
2134 if (!pptr->subsidiary) {
2135 iport->pptr = pptr;
2136 iport->portid = pptr->portid;
2137 }
2138
2139 /*
2140 * Finally, insert the phy into our list
2141 */
2142 pmcs_unlock_phy(pptr);
2143 pmcs_add_phy_to_iport(iport, pptr);
2144
2145 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: found "
2146 "phy %d [0x%p] on iport%d, refcnt(%d)", __func__, phynum,
2147 (void *)pptr, inst, iport->refcnt);
2148 }
2149 mutex_exit(&pwp->lock);
2150 sas_phymap_phys_free(phys);
2151 RESTART_DISCOVERY(pwp);
2152 return (DDI_SUCCESS);
2153 }
2154
2155 /*
2156 * Return the iport that ua is associated with, or NULL. If an iport is
2157 * returned, it will be held and the caller must release the hold.
2158 */
2159 static pmcs_iport_t *
pmcs_get_iport_by_ua(pmcs_hw_t * pwp,char * ua)2160 pmcs_get_iport_by_ua(pmcs_hw_t *pwp, char *ua)
2161 {
2162 pmcs_iport_t *iport = NULL;
2163
2164 rw_enter(&pwp->iports_lock, RW_READER);
2165 for (iport = list_head(&pwp->iports);
2166 iport != NULL;
2167 iport = list_next(&pwp->iports, iport)) {
2168 mutex_enter(&iport->lock);
2169 if (strcmp(iport->ua, ua) == 0) {
2170 mutex_exit(&iport->lock);
2171 pmcs_hold_iport(iport);
2172 break;
2173 }
2174 mutex_exit(&iport->lock);
2175 }
2176 rw_exit(&pwp->iports_lock);
2177
2178 return (iport);
2179 }
2180
2181 /*
2182 * Return the iport that pptr is associated with, or NULL.
2183 * If an iport is returned, there is a hold that the caller must release.
2184 */
2185 pmcs_iport_t *
pmcs_get_iport_by_wwn(pmcs_hw_t * pwp,uint64_t wwn)2186 pmcs_get_iport_by_wwn(pmcs_hw_t *pwp, uint64_t wwn)
2187 {
2188 pmcs_iport_t *iport = NULL;
2189 char *ua;
2190
2191 ua = sas_phymap_lookup_ua(pwp->hss_phymap, pwp->sas_wwns[0], wwn);
2192 if (ua) {
2193 iport = pmcs_get_iport_by_ua(pwp, ua);
2194 if (iport) {
2195 mutex_enter(&iport->lock);
2196 pmcs_iport_active(iport);
2197 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: "
2198 "found iport [0x%p] on ua (%s), refcnt (%d)",
2199 __func__, (void *)iport, ua, iport->refcnt);
2200 mutex_exit(&iport->lock);
2201 }
2202 }
2203
2204 return (iport);
2205 }
2206
2207 /*
2208 * Promote the next phy on this port to primary, and return it.
2209 * Called when the primary PHY on a port is going down, but the port
2210 * remains up (see pmcs_intr.c).
2211 */
2212 pmcs_phy_t *
pmcs_promote_next_phy(pmcs_phy_t * prev_primary)2213 pmcs_promote_next_phy(pmcs_phy_t *prev_primary)
2214 {
2215 pmcs_hw_t *pwp;
2216 pmcs_iport_t *iport;
2217 pmcs_phy_t *pptr, *child;
2218 int portid;
2219
2220 pmcs_lock_phy(prev_primary);
2221 portid = prev_primary->portid;
2222 iport = prev_primary->iport;
2223 pwp = prev_primary->pwp;
2224
2225 /* Use the first available phy in this port */
2226 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
2227 if ((pptr->portid == portid) && (pptr != prev_primary)) {
2228 mutex_enter(&pptr->phy_lock);
2229 break;
2230 }
2231 }
2232
2233 if (pptr == NULL) {
2234 pmcs_unlock_phy(prev_primary);
2235 return (NULL);
2236 }
2237
2238 if (iport) {
2239 mutex_enter(&iport->lock);
2240 iport->pptr = pptr;
2241 mutex_exit(&iport->lock);
2242 }
2243
2244 /* Update the phy handle with the data from the previous primary */
2245 pptr->children = prev_primary->children;
2246 child = pptr->children;
2247 while (child) {
2248 child->parent = pptr;
2249 child = child->sibling;
2250 }
2251 pptr->ncphy = prev_primary->ncphy;
2252 pptr->width = prev_primary->width;
2253 pptr->dtype = prev_primary->dtype;
2254 pptr->pend_dtype = prev_primary->pend_dtype;
2255 pptr->tolerates_sas2 = prev_primary->tolerates_sas2;
2256 pptr->atdt = prev_primary->atdt;
2257 pptr->portid = prev_primary->portid;
2258 pptr->link_rate = prev_primary->link_rate;
2259 pptr->configured = prev_primary->configured;
2260 pptr->iport = prev_primary->iport;
2261 pptr->target = prev_primary->target;
2262 if (pptr->target) {
2263 pptr->target->phy = pptr;
2264 }
2265
2266 /* Update the phy mask properties for the affected PHYs */
2267 /* Clear the current values... */
2268 pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp,
2269 pptr->tgt_port_pm_tmp, B_FALSE);
2270 /* ...replace with the values from prev_primary... */
2271 pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm_tmp,
2272 prev_primary->tgt_port_pm_tmp, B_TRUE);
2273 /* ...then clear prev_primary's PHY values from the new primary */
2274 pmcs_update_phy_pm_props(pptr, prev_primary->att_port_pm,
2275 prev_primary->tgt_port_pm, B_FALSE);
2276 /* Clear the prev_primary's values */
2277 pmcs_update_phy_pm_props(prev_primary, prev_primary->att_port_pm_tmp,
2278 prev_primary->tgt_port_pm_tmp, B_FALSE);
2279
2280 pptr->subsidiary = 0;
2281
2282 prev_primary->subsidiary = 1;
2283 prev_primary->children = NULL;
2284 prev_primary->target = NULL;
2285 pptr->device_id = prev_primary->device_id;
2286 pptr->valid_device_id = prev_primary->valid_device_id;
2287 pmcs_unlock_phy(prev_primary);
2288
2289 /*
2290 * We call pmcs_unlock_phy() on pptr because it now contains the
2291 * list of children.
2292 */
2293 pmcs_unlock_phy(pptr);
2294
2295 return (pptr);
2296 }
2297
2298 void
pmcs_hold_iport(pmcs_iport_t * iport)2299 pmcs_hold_iport(pmcs_iport_t *iport)
2300 {
2301 /*
2302 * Grab a reference to this iport.
2303 */
2304 ASSERT(iport);
2305 mutex_enter(&iport->refcnt_lock);
2306 iport->refcnt++;
2307 mutex_exit(&iport->refcnt_lock);
2308
2309 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: iport "
2310 "[0x%p] refcnt (%d)", __func__, (void *)iport, iport->refcnt);
2311 }
2312
2313 void
pmcs_rele_iport(pmcs_iport_t * iport)2314 pmcs_rele_iport(pmcs_iport_t *iport)
2315 {
2316 /*
2317 * Release a refcnt on this iport. If this is the last reference,
2318 * signal the potential waiter in pmcs_iport_unattach().
2319 */
2320 ASSERT(iport->refcnt > 0);
2321 mutex_enter(&iport->refcnt_lock);
2322 iport->refcnt--;
2323 mutex_exit(&iport->refcnt_lock);
2324 if (iport->refcnt == 0) {
2325 cv_signal(&iport->refcnt_cv);
2326 }
2327 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: iport "
2328 "[0x%p] refcnt (%d)", __func__, (void *)iport, iport->refcnt);
2329 }
2330
2331 void
pmcs_phymap_activate(void * arg,char * ua,void ** privp)2332 pmcs_phymap_activate(void *arg, char *ua, void **privp)
2333 {
2334 _NOTE(ARGUNUSED(privp));
2335 pmcs_hw_t *pwp = arg;
2336 pmcs_iport_t *iport = NULL;
2337
2338 mutex_enter(&pwp->lock);
2339 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD) ||
2340 (pwp->state == STATE_IN_RESET)) {
2341 mutex_exit(&pwp->lock);
2342 return;
2343 }
2344 pwp->phymap_active++;
2345 mutex_exit(&pwp->lock);
2346
2347 if (scsi_hba_iportmap_iport_add(pwp->hss_iportmap, ua, NULL) !=
2348 DDI_SUCCESS) {
2349 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to "
2350 "add iport handle on unit address [%s]", __func__, ua);
2351 } else {
2352 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: "
2353 "phymap_active count (%d), added iport handle on unit "
2354 "address [%s]", __func__, pwp->phymap_active, ua);
2355 }
2356
2357 /* Set the HBA softstate as our private data for this unit address */
2358 *privp = (void *)pwp;
2359
2360 /*
2361 * We are waiting on attach for this iport node, unless it is still
2362 * attached. This can happen if a consumer has an outstanding open
2363 * on our iport node, but the port is down. If this is the case, we
2364 * need to configure our iport here for reuse.
2365 */
2366 iport = pmcs_get_iport_by_ua(pwp, ua);
2367 if (iport) {
2368 mutex_enter(&iport->lock);
2369 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) {
2370 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: "
2371 "failed to configure phys on iport [0x%p] at "
2372 "unit address (%s)", __func__, (void *)iport, ua);
2373 }
2374 pmcs_iport_active(iport);
2375 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
2376 &iport->nphy);
2377 mutex_exit(&iport->lock);
2378 pmcs_rele_iport(iport);
2379 }
2380
2381 }
2382
2383 void
pmcs_phymap_deactivate(void * arg,char * ua,void * privp)2384 pmcs_phymap_deactivate(void *arg, char *ua, void *privp)
2385 {
2386 _NOTE(ARGUNUSED(privp));
2387 pmcs_hw_t *pwp = arg;
2388 pmcs_iport_t *iport;
2389
2390 mutex_enter(&pwp->lock);
2391 pwp->phymap_active--;
2392 mutex_exit(&pwp->lock);
2393
2394 if (scsi_hba_iportmap_iport_remove(pwp->hss_iportmap, ua) !=
2395 DDI_SUCCESS) {
2396 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: failed to "
2397 "remove iport handle on unit address [%s]", __func__, ua);
2398 } else {
2399 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL, "%s: "
2400 "phymap_active count (%d), removed iport handle on unit "
2401 "address [%s]", __func__, pwp->phymap_active, ua);
2402 }
2403
2404 iport = pmcs_get_iport_by_ua(pwp, ua);
2405
2406 if (iport == NULL) {
2407 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "%s: failed "
2408 "lookup of iport handle on unit addr (%s)", __func__, ua);
2409 return;
2410 }
2411
2412 mutex_enter(&iport->lock);
2413 iport->ua_state = UA_INACTIVE;
2414 iport->portid = PMCS_IPORT_INVALID_PORT_ID;
2415 pmcs_remove_phy_from_iport(iport, NULL);
2416 mutex_exit(&iport->lock);
2417 pmcs_rele_iport(iport);
2418 }
2419
2420 /*
2421 * Top-level discovery function
2422 */
2423 void
pmcs_discover(pmcs_hw_t * pwp)2424 pmcs_discover(pmcs_hw_t *pwp)
2425 {
2426 pmcs_phy_t *pptr;
2427 pmcs_phy_t *root_phy;
2428
2429 DTRACE_PROBE2(pmcs__discover__entry, ulong_t, pwp->work_flags,
2430 boolean_t, pwp->config_changed);
2431
2432 mutex_enter(&pwp->lock);
2433
2434 if (pwp->state != STATE_RUNNING) {
2435 mutex_exit(&pwp->lock);
2436 return;
2437 }
2438
2439 /* Ensure we have at least one phymap active */
2440 if (pwp->phymap_active == 0) {
2441 mutex_exit(&pwp->lock);
2442 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2443 "%s: phymap inactive, exiting", __func__);
2444 return;
2445 }
2446
2447 mutex_exit(&pwp->lock);
2448
2449 /*
2450 * If no iports have attached, but we have PHYs that are up, we
2451 * are waiting for iport attach to complete. Restart discovery.
2452 */
2453 rw_enter(&pwp->iports_lock, RW_READER);
2454 if (!pwp->iports_attached) {
2455 rw_exit(&pwp->iports_lock);
2456 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2457 "%s: no iports attached, retry discovery", __func__);
2458 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
2459 return;
2460 }
2461 rw_exit(&pwp->iports_lock);
2462
2463 mutex_enter(&pwp->config_lock);
2464 if (pwp->configuring) {
2465 mutex_exit(&pwp->config_lock);
2466 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2467 "%s: configuration already in progress", __func__);
2468 return;
2469 }
2470
2471 if (pmcs_acquire_scratch(pwp, B_FALSE)) {
2472 mutex_exit(&pwp->config_lock);
2473 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2474 "%s: cannot allocate scratch", __func__);
2475 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
2476 return;
2477 }
2478
2479 pwp->configuring = 1;
2480 pwp->config_changed = B_FALSE;
2481 mutex_exit(&pwp->config_lock);
2482
2483 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery begin");
2484
2485 /*
2486 * First, tell SCSA that we're beginning set operations.
2487 */
2488 pmcs_begin_observations(pwp);
2489
2490 /*
2491 * The order of the following traversals is important.
2492 *
2493 * The first one checks for changed expanders.
2494 *
2495 * The second one aborts commands for dead devices and deregisters them.
2496 *
2497 * The third one clears the contents of dead expanders from the tree
2498 *
2499 * The fourth one clears now dead devices in expanders that remain.
2500 */
2501
2502 /*
2503 * 1. Check expanders marked changed (but not dead) to see if they still
2504 * have the same number of phys and the same SAS address. Mark them,
2505 * their subsidiary phys (if wide) and their descendents dead if
2506 * anything has changed. Check the devices they contain to see if
2507 * *they* have changed. If they've changed from type NOTHING we leave
2508 * them marked changed to be configured later (picking up a new SAS
2509 * address and link rate if possible). Otherwise, any change in type,
2510 * SAS address or removal of target role will cause us to mark them
2511 * (and their descendents) as dead (and cause any pending commands
2512 * and associated devices to be removed).
2513 *
2514 * NOTE: We don't want to bail on discovery if the config has
2515 * changed until *after* we run pmcs_kill_devices.
2516 */
2517 root_phy = pwp->root_phys;
2518 pmcs_check_expanders(pwp, root_phy);
2519
2520 /*
2521 * 2. Descend the tree looking for dead devices and kill them
2522 * by aborting all active commands and then deregistering them.
2523 */
2524 if (pmcs_kill_devices(pwp, root_phy)) {
2525 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2526 "%s: pmcs_kill_devices failed!", __func__);
2527 }
2528
2529 /*
2530 * 3. Check for dead expanders and remove their children from the tree.
2531 * By the time we get here, the devices and commands for them have
2532 * already been terminated and removed.
2533 *
2534 * We do this independent of the configuration count changing so we can
2535 * free any dead device PHYs that were discovered while checking
2536 * expanders. We ignore any subsidiary phys as pmcs_clear_expander
2537 * will take care of those.
2538 *
2539 * NOTE: pmcs_clear_expander requires softstate lock
2540 */
2541 mutex_enter(&pwp->lock);
2542 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
2543 /*
2544 * Call pmcs_clear_expander for every root PHY. It will
2545 * recurse and determine which (if any) expanders actually
2546 * need to be cleared.
2547 */
2548 pmcs_lock_phy(pptr);
2549 pmcs_clear_expander(pwp, pptr, 0);
2550 pmcs_unlock_phy(pptr);
2551 }
2552 mutex_exit(&pwp->lock);
2553
2554 /*
2555 * 4. Check for dead devices and nullify them. By the time we get here,
2556 * the devices and commands for them have already been terminated
2557 * and removed. This is different from step 2 in that this just nulls
2558 * phys that are part of expanders that are still here but used to
2559 * be something but are no longer something (e.g., after a pulled
2560 * disk drive). Note that dead expanders had their contained phys
2561 * removed from the tree- here, the expanders themselves are
2562 * nullified (unless they were removed by being contained in another
2563 * expander phy).
2564 */
2565 pmcs_clear_phys(pwp, root_phy);
2566
2567 /*
2568 * 5. Now check for and configure new devices.
2569 */
2570 if (pmcs_configure_new_devices(pwp, root_phy)) {
2571 goto restart;
2572 }
2573
2574 DTRACE_PROBE2(pmcs__discover__exit, ulong_t, pwp->work_flags,
2575 boolean_t, pwp->config_changed);
2576 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, "Discovery end");
2577
2578 mutex_enter(&pwp->config_lock);
2579
2580 if (pwp->config_changed == B_FALSE) {
2581 /*
2582 * Observation is stable, report what we currently see to
2583 * the tgtmaps for delta processing. Start by setting
2584 * BEGIN on all tgtmaps.
2585 */
2586 mutex_exit(&pwp->config_lock);
2587 if (pmcs_report_observations(pwp) == B_FALSE) {
2588 goto restart;
2589 }
2590 mutex_enter(&pwp->config_lock);
2591 } else {
2592 /*
2593 * If config_changed is TRUE, we need to reschedule
2594 * discovery now.
2595 */
2596 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2597 "%s: Config has changed, will re-run discovery", __func__);
2598 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
2599 }
2600
2601 pmcs_release_scratch(pwp);
2602 if (!pwp->quiesced) {
2603 pwp->blocked = 0;
2604 }
2605 pwp->configuring = 0;
2606 cv_signal(&pwp->config_cv);
2607 mutex_exit(&pwp->config_lock);
2608
2609 #ifdef DEBUG
2610 pptr = pmcs_find_phy_needing_work(pwp, pwp->root_phys);
2611 if (pptr != NULL) {
2612 if (!WORK_IS_SCHEDULED(pwp, PMCS_WORK_DISCOVER)) {
2613 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
2614 "PHY %s dead=%d changed=%d configured=%d "
2615 "but no work scheduled", pptr->path, pptr->dead,
2616 pptr->changed, pptr->configured);
2617 }
2618 pmcs_unlock_phy(pptr);
2619 }
2620 #endif
2621
2622 return;
2623
2624 restart:
2625 /* Clean up and restart discovery */
2626 pmcs_release_scratch(pwp);
2627 pmcs_flush_observations(pwp);
2628 mutex_enter(&pwp->config_lock);
2629 pwp->configuring = 0;
2630 cv_signal(&pwp->config_cv);
2631 RESTART_DISCOVERY_LOCKED(pwp);
2632 mutex_exit(&pwp->config_lock);
2633 }
2634
2635 #ifdef DEBUG
2636 /*
2637 * Return any PHY that needs to have scheduled work done. The PHY is returned
2638 * locked.
2639 */
2640 static pmcs_phy_t *
pmcs_find_phy_needing_work(pmcs_hw_t * pwp,pmcs_phy_t * pptr)2641 pmcs_find_phy_needing_work(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
2642 {
2643 pmcs_phy_t *cphyp, *pnext;
2644
2645 while (pptr) {
2646 pmcs_lock_phy(pptr);
2647
2648 if (pptr->changed || (pptr->dead && pptr->valid_device_id)) {
2649 return (pptr);
2650 }
2651
2652 pnext = pptr->sibling;
2653
2654 if (pptr->children) {
2655 cphyp = pptr->children;
2656 pmcs_unlock_phy(pptr);
2657 cphyp = pmcs_find_phy_needing_work(pwp, cphyp);
2658 if (cphyp) {
2659 return (cphyp);
2660 }
2661 } else {
2662 pmcs_unlock_phy(pptr);
2663 }
2664
2665 pptr = pnext;
2666 }
2667
2668 return (NULL);
2669 }
2670 #endif /* DEBUG */
2671
2672 /*
2673 * We may (or may not) report observations to SCSA. This is prefaced by
2674 * issuing a set_begin for each iport target map.
2675 */
2676 static void
pmcs_begin_observations(pmcs_hw_t * pwp)2677 pmcs_begin_observations(pmcs_hw_t *pwp)
2678 {
2679 pmcs_iport_t *iport;
2680 scsi_hba_tgtmap_t *tgtmap;
2681
2682 rw_enter(&pwp->iports_lock, RW_READER);
2683 for (iport = list_head(&pwp->iports); iport != NULL;
2684 iport = list_next(&pwp->iports, iport)) {
2685 /*
2686 * Unless we have at least one phy up, skip this iport.
2687 * Note we don't need to lock the iport for report_skip
2688 * since it is only used here. We are doing the skip so that
2689 * the phymap and iportmap stabilization times are honored -
2690 * giving us the ability to recover port operation within the
2691 * stabilization time without unconfiguring targets using the
2692 * port.
2693 */
2694 if (!sas_phymap_uahasphys(pwp->hss_phymap, iport->ua)) {
2695 iport->report_skip = 1;
2696 continue; /* skip set_begin */
2697 }
2698 iport->report_skip = 0;
2699
2700 tgtmap = iport->iss_tgtmap;
2701 ASSERT(tgtmap);
2702 if (scsi_hba_tgtmap_set_begin(tgtmap) != DDI_SUCCESS) {
2703 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL,
2704 "%s: cannot set_begin tgtmap ", __func__);
2705 rw_exit(&pwp->iports_lock);
2706 return;
2707 }
2708 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL,
2709 "%s: set begin on tgtmap [0x%p]", __func__, (void *)tgtmap);
2710 }
2711 rw_exit(&pwp->iports_lock);
2712 }
2713
2714 /*
2715 * Tell SCSA to flush the observations we've already sent (if any), as they
2716 * are no longer valid.
2717 */
2718 static void
pmcs_flush_observations(pmcs_hw_t * pwp)2719 pmcs_flush_observations(pmcs_hw_t *pwp)
2720 {
2721 pmcs_iport_t *iport;
2722 scsi_hba_tgtmap_t *tgtmap;
2723
2724 rw_enter(&pwp->iports_lock, RW_READER);
2725 for (iport = list_head(&pwp->iports); iport != NULL;
2726 iport = list_next(&pwp->iports, iport)) {
2727 /*
2728 * Skip this iport if it has no PHYs up.
2729 */
2730 if (!sas_phymap_uahasphys(pwp->hss_phymap, iport->ua)) {
2731 continue;
2732 }
2733
2734 tgtmap = iport->iss_tgtmap;
2735 ASSERT(tgtmap);
2736 if (scsi_hba_tgtmap_set_flush(tgtmap) != DDI_SUCCESS) {
2737 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL,
2738 "%s: Failed set_flush on tgtmap 0x%p", __func__,
2739 (void *)tgtmap);
2740 } else {
2741 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL,
2742 "%s: set flush on tgtmap 0x%p", __func__,
2743 (void *)tgtmap);
2744 }
2745 }
2746 rw_exit(&pwp->iports_lock);
2747 }
2748
2749 /*
2750 * Report current observations to SCSA.
2751 */
2752 static boolean_t
pmcs_report_observations(pmcs_hw_t * pwp)2753 pmcs_report_observations(pmcs_hw_t *pwp)
2754 {
2755 pmcs_iport_t *iport;
2756 scsi_hba_tgtmap_t *tgtmap;
2757 char *ap;
2758 pmcs_phy_t *pptr;
2759 uint64_t wwn;
2760
2761 /*
2762 * Observation is stable, report what we currently see to the tgtmaps
2763 * for delta processing.
2764 */
2765 pptr = pwp->root_phys;
2766
2767 while (pptr) {
2768 pmcs_lock_phy(pptr);
2769
2770 /*
2771 * Skip PHYs that have nothing attached or are dead.
2772 */
2773 if ((pptr->dtype == NOTHING) || pptr->dead) {
2774 pmcs_unlock_phy(pptr);
2775 pptr = pptr->sibling;
2776 continue;
2777 }
2778
2779 if (pptr->changed) {
2780 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
2781 "%s: oops, PHY %s changed; restart discovery",
2782 __func__, pptr->path);
2783 pmcs_unlock_phy(pptr);
2784 return (B_FALSE);
2785 }
2786
2787 /*
2788 * Get the iport for this root PHY, then call the helper
2789 * to report observations for this iport's targets
2790 */
2791 wwn = pmcs_barray2wwn(pptr->sas_address);
2792 pmcs_unlock_phy(pptr);
2793 iport = pmcs_get_iport_by_wwn(pwp, wwn);
2794 if (iport == NULL) {
2795 /* No iport for this tgt */
2796 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2797 "%s: no iport for this target", __func__);
2798 pptr = pptr->sibling;
2799 continue;
2800 }
2801
2802 pmcs_lock_phy(pptr);
2803 if (!iport->report_skip) {
2804 if (pmcs_report_iport_observations(
2805 pwp, iport, pptr) == B_FALSE) {
2806 pmcs_rele_iport(iport);
2807 pmcs_unlock_phy(pptr);
2808 return (B_FALSE);
2809 }
2810 }
2811 pmcs_rele_iport(iport);
2812 pmcs_unlock_phy(pptr);
2813 pptr = pptr->sibling;
2814 }
2815
2816 /*
2817 * The observation is complete, end sets. Note we will skip any
2818 * iports that are active, but have no PHYs in them (i.e. awaiting
2819 * unconfigure). Set to restart discovery if we find this.
2820 */
2821 rw_enter(&pwp->iports_lock, RW_READER);
2822 for (iport = list_head(&pwp->iports);
2823 iport != NULL;
2824 iport = list_next(&pwp->iports, iport)) {
2825
2826 if (iport->report_skip)
2827 continue; /* skip set_end */
2828
2829 tgtmap = iport->iss_tgtmap;
2830 ASSERT(tgtmap);
2831 if (scsi_hba_tgtmap_set_end(tgtmap, 0) != DDI_SUCCESS) {
2832 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL,
2833 "%s: cannot set_end tgtmap ", __func__);
2834 rw_exit(&pwp->iports_lock);
2835 return (B_FALSE);
2836 }
2837 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL,
2838 "%s: set end on tgtmap [0x%p]", __func__, (void *)tgtmap);
2839 }
2840
2841 /*
2842 * Now that discovery is complete, set up the necessary
2843 * DDI properties on each iport node.
2844 */
2845 for (iport = list_head(&pwp->iports); iport != NULL;
2846 iport = list_next(&pwp->iports, iport)) {
2847 /* Set up the 'attached-port' property on the iport */
2848 ap = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP);
2849 mutex_enter(&iport->lock);
2850 pptr = iport->pptr;
2851 mutex_exit(&iport->lock);
2852 if (pptr == NULL) {
2853 /*
2854 * This iport is down, but has not been
2855 * removed from our list (unconfigured).
2856 * Set our value to '0'.
2857 */
2858 (void) snprintf(ap, 1, "%s", "0");
2859 } else {
2860 /* Otherwise, set it to remote phy's wwn */
2861 pmcs_lock_phy(pptr);
2862 wwn = pmcs_barray2wwn(pptr->sas_address);
2863 (void) scsi_wwn_to_wwnstr(wwn, 1, ap);
2864 pmcs_unlock_phy(pptr);
2865 }
2866 if (ndi_prop_update_string(DDI_DEV_T_NONE, iport->dip,
2867 SCSI_ADDR_PROP_ATTACHED_PORT, ap) != DDI_SUCCESS) {
2868 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed "
2869 "to set prop ("SCSI_ADDR_PROP_ATTACHED_PORT")",
2870 __func__);
2871 }
2872 kmem_free(ap, PMCS_MAX_UA_SIZE);
2873 }
2874 rw_exit(&pwp->iports_lock);
2875
2876 return (B_TRUE);
2877 }
2878
2879 /*
2880 * Report observations into a particular iport's target map
2881 *
2882 * Called with phyp (and all descendents) locked
2883 */
2884 static boolean_t
pmcs_report_iport_observations(pmcs_hw_t * pwp,pmcs_iport_t * iport,pmcs_phy_t * phyp)2885 pmcs_report_iport_observations(pmcs_hw_t *pwp, pmcs_iport_t *iport,
2886 pmcs_phy_t *phyp)
2887 {
2888 pmcs_phy_t *lphyp;
2889 scsi_hba_tgtmap_t *tgtmap;
2890 scsi_tgtmap_tgt_type_t tgt_type;
2891 char *ua;
2892 uint64_t wwn;
2893
2894 tgtmap = iport->iss_tgtmap;
2895 ASSERT(tgtmap);
2896
2897 lphyp = phyp;
2898 while (lphyp) {
2899 switch (lphyp->dtype) {
2900 default: /* Skip unknown PHYs. */
2901 /* for non-root phys, skip to sibling */
2902 goto next_phy;
2903
2904 case SATA:
2905 case SAS:
2906 tgt_type = SCSI_TGT_SCSI_DEVICE;
2907 break;
2908
2909 case EXPANDER:
2910 tgt_type = SCSI_TGT_SMP_DEVICE;
2911 break;
2912 }
2913
2914 if (lphyp->dead || !lphyp->configured) {
2915 goto next_phy;
2916 }
2917
2918 /*
2919 * Validate the PHY's SAS address
2920 */
2921 if (((lphyp->sas_address[0] & 0xf0) >> 4) != NAA_IEEE_REG) {
2922 pmcs_prt(pwp, PMCS_PRT_ERR, lphyp, NULL,
2923 "PHY 0x%p (%s) has invalid SAS address; "
2924 "will not enumerate", (void *)lphyp, lphyp->path);
2925 goto next_phy;
2926 }
2927
2928 wwn = pmcs_barray2wwn(lphyp->sas_address);
2929 ua = scsi_wwn_to_wwnstr(wwn, 1, NULL);
2930
2931 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, lphyp, NULL,
2932 "iport_observation: adding %s on tgtmap [0x%p] phy [0x%p]",
2933 ua, (void *)tgtmap, (void*)lphyp);
2934
2935 if (scsi_hba_tgtmap_set_add(tgtmap, tgt_type, ua, NULL) !=
2936 DDI_SUCCESS) {
2937 pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, NULL, NULL,
2938 "%s: failed to add address %s", __func__, ua);
2939 scsi_free_wwnstr(ua);
2940 return (B_FALSE);
2941 }
2942 scsi_free_wwnstr(ua);
2943
2944 if (lphyp->children) {
2945 if (pmcs_report_iport_observations(pwp, iport,
2946 lphyp->children) == B_FALSE) {
2947 return (B_FALSE);
2948 }
2949 }
2950
2951 /* for non-root phys, report siblings too */
2952 next_phy:
2953 if (IS_ROOT_PHY(lphyp)) {
2954 lphyp = NULL;
2955 } else {
2956 lphyp = lphyp->sibling;
2957 }
2958 }
2959
2960 return (B_TRUE);
2961 }
2962
2963 /*
2964 * Check for and configure new devices.
2965 *
2966 * If the changed device is a SATA device, add a SATA device.
2967 *
2968 * If the changed device is a SAS device, add a SAS device.
2969 *
2970 * If the changed device is an EXPANDER device, do a REPORT
2971 * GENERAL SMP command to find out the number of contained phys.
2972 *
2973 * For each number of contained phys, allocate a phy, do a
2974 * DISCOVERY SMP command to find out what kind of device it
2975 * is and add it to the linked list of phys on the *next* level.
2976 *
2977 * NOTE: pptr passed in by the caller will be a root PHY
2978 */
2979 static int
pmcs_configure_new_devices(pmcs_hw_t * pwp,pmcs_phy_t * pptr)2980 pmcs_configure_new_devices(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
2981 {
2982 int rval = 0;
2983 pmcs_iport_t *iport;
2984 pmcs_phy_t *pnext, *orig_pptr = pptr, *root_phy, *pchild;
2985 uint64_t wwn;
2986
2987 /*
2988 * First, walk through each PHY at this level
2989 */
2990 while (pptr) {
2991 pmcs_lock_phy(pptr);
2992 pnext = pptr->sibling;
2993
2994 /*
2995 * Set the new dtype if it has changed
2996 */
2997 if ((pptr->pend_dtype != NEW) &&
2998 (pptr->pend_dtype != pptr->dtype)) {
2999 pptr->dtype = pptr->pend_dtype;
3000 }
3001
3002 if (pptr->changed == 0 || pptr->dead || pptr->configured) {
3003 goto next_phy;
3004 }
3005
3006 /* Confirm that this iport is configured */
3007 root_phy = pmcs_get_root_phy(pptr);
3008 wwn = pmcs_barray2wwn(root_phy->sas_address);
3009 pmcs_unlock_phy(pptr);
3010 iport = pmcs_get_iport_by_wwn(pwp, wwn);
3011 if (iport == NULL) {
3012 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
3013 "%s: iport not yet configured, "
3014 "retry discovery", __func__);
3015 pnext = NULL;
3016 rval = -1;
3017 pmcs_lock_phy(pptr);
3018 goto next_phy;
3019 }
3020
3021 pmcs_lock_phy(pptr);
3022 switch (pptr->dtype) {
3023 case NOTHING:
3024 pptr->changed = 0;
3025 break;
3026 case SATA:
3027 case SAS:
3028 pptr->iport = iport;
3029 pmcs_new_tport(pwp, pptr);
3030 break;
3031 case EXPANDER:
3032 pmcs_configure_expander(pwp, pptr, iport);
3033 break;
3034 }
3035 pmcs_rele_iport(iport);
3036
3037 mutex_enter(&pwp->config_lock);
3038 if (pwp->config_changed) {
3039 mutex_exit(&pwp->config_lock);
3040 pnext = NULL;
3041 goto next_phy;
3042 }
3043 mutex_exit(&pwp->config_lock);
3044
3045 next_phy:
3046 pmcs_unlock_phy(pptr);
3047 pptr = pnext;
3048 }
3049
3050 if (rval != 0) {
3051 return (rval);
3052 }
3053
3054 /*
3055 * Now walk through each PHY again, recalling ourselves if they
3056 * have children
3057 */
3058 pptr = orig_pptr;
3059 while (pptr) {
3060 pmcs_lock_phy(pptr);
3061 pnext = pptr->sibling;
3062 pchild = pptr->children;
3063 pmcs_unlock_phy(pptr);
3064
3065 if (pchild) {
3066 rval = pmcs_configure_new_devices(pwp, pchild);
3067 if (rval != 0) {
3068 break;
3069 }
3070 }
3071
3072 pptr = pnext;
3073 }
3074
3075 return (rval);
3076 }
3077
3078 /*
3079 * Set all phys and descendent phys as changed if changed == B_TRUE, otherwise
3080 * mark them all as not changed.
3081 *
3082 * Called with parent PHY locked.
3083 */
3084 void
pmcs_set_changed(pmcs_hw_t * pwp,pmcs_phy_t * parent,boolean_t changed,int level)3085 pmcs_set_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, boolean_t changed,
3086 int level)
3087 {
3088 pmcs_phy_t *pptr;
3089
3090 if (level == 0) {
3091 if (changed) {
3092 PHY_CHANGED(pwp, parent);
3093 } else {
3094 parent->changed = 0;
3095 }
3096 if (parent->dtype == EXPANDER && parent->level) {
3097 parent->width = 1;
3098 }
3099 if (parent->children) {
3100 pmcs_set_changed(pwp, parent->children, changed,
3101 level + 1);
3102 }
3103 } else {
3104 pptr = parent;
3105 while (pptr) {
3106 if (changed) {
3107 PHY_CHANGED(pwp, pptr);
3108 } else {
3109 pptr->changed = 0;
3110 }
3111 if (pptr->dtype == EXPANDER && pptr->level) {
3112 pptr->width = 1;
3113 }
3114 if (pptr->children) {
3115 pmcs_set_changed(pwp, pptr->children, changed,
3116 level + 1);
3117 }
3118 pptr = pptr->sibling;
3119 }
3120 }
3121 }
3122
3123 /*
3124 * Take the passed phy mark it and its descendants as dead.
3125 * Fire up reconfiguration to abort commands and bury it.
3126 *
3127 * Called with the parent PHY locked.
3128 */
3129 void
pmcs_kill_changed(pmcs_hw_t * pwp,pmcs_phy_t * parent,int level)3130 pmcs_kill_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, int level)
3131 {
3132 pmcs_phy_t *pptr = parent;
3133
3134 while (pptr) {
3135 pptr->link_rate = 0;
3136 pptr->abort_sent = 0;
3137 pptr->abort_pending = 1;
3138 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
3139 pptr->need_rl_ext = 0;
3140
3141 if (pptr->dead == 0) {
3142 PHY_CHANGED(pwp, pptr);
3143 RESTART_DISCOVERY(pwp);
3144 }
3145
3146 pptr->dead = 1;
3147
3148 if (pptr->children) {
3149 pmcs_kill_changed(pwp, pptr->children, level + 1);
3150 }
3151
3152 /*
3153 * Only kill siblings at level > 0
3154 */
3155 if (level == 0) {
3156 return;
3157 }
3158
3159 pptr = pptr->sibling;
3160 }
3161 }
3162
3163 /*
3164 * Go through every PHY and clear any that are dead (unless they're expanders)
3165 */
3166 static void
pmcs_clear_phys(pmcs_hw_t * pwp,pmcs_phy_t * pptr)3167 pmcs_clear_phys(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
3168 {
3169 pmcs_phy_t *pnext, *phyp;
3170
3171 phyp = pptr;
3172 while (phyp) {
3173 if (IS_ROOT_PHY(phyp)) {
3174 pmcs_lock_phy(phyp);
3175 }
3176
3177 if ((phyp->dtype != EXPANDER) && phyp->dead) {
3178 pmcs_clear_phy(pwp, phyp);
3179 }
3180
3181 if (phyp->children) {
3182 pmcs_clear_phys(pwp, phyp->children);
3183 }
3184
3185 pnext = phyp->sibling;
3186
3187 if (IS_ROOT_PHY(phyp)) {
3188 pmcs_unlock_phy(phyp);
3189 }
3190
3191 phyp = pnext;
3192 }
3193 }
3194
3195 /*
3196 * Clear volatile parts of a phy. Called with PHY locked.
3197 */
3198 void
pmcs_clear_phy(pmcs_hw_t * pwp,pmcs_phy_t * pptr)3199 pmcs_clear_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
3200 {
3201 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: %s",
3202 __func__, pptr->path);
3203 ASSERT(mutex_owned(&pptr->phy_lock));
3204 /* keep sibling */
3205 /* keep children */
3206 /* keep parent */
3207 pptr->device_id = PMCS_INVALID_DEVICE_ID;
3208 /* keep hw_event_ack */
3209 pptr->ncphy = 0;
3210 /* keep phynum */
3211 pptr->width = 0;
3212 pptr->ds_recovery_retries = 0;
3213 pptr->ds_prev_good_recoveries = 0;
3214 pptr->last_good_recovery = 0;
3215 pptr->prev_recovery = 0;
3216
3217 /* keep dtype */
3218 pptr->config_stop = 0;
3219 pptr->spinup_hold = 0;
3220 pptr->atdt = 0;
3221 /* keep portid */
3222 pptr->link_rate = 0;
3223 pptr->valid_device_id = 0;
3224 pptr->abort_sent = 0;
3225 pptr->abort_pending = 0;
3226 pptr->need_rl_ext = 0;
3227 pptr->subsidiary = 0;
3228 pptr->configured = 0;
3229 pptr->deregister_wait = 0;
3230 pptr->reenumerate = 0;
3231 /* Only mark dead if it's not a root PHY and its dtype isn't NOTHING */
3232 /* XXX: What about directly attached disks? */
3233 if (!IS_ROOT_PHY(pptr) && (pptr->dtype != NOTHING))
3234 pptr->dead = 1;
3235 pptr->changed = 0;
3236 /* keep SAS address */
3237 /* keep path */
3238 /* keep ref_count */
3239 /* Don't clear iport on root PHYs - they are handled in pmcs_intr.c */
3240 if (!IS_ROOT_PHY(pptr)) {
3241 pptr->last_iport = pptr->iport;
3242 pptr->iport = NULL;
3243 }
3244 /* keep target */
3245 }
3246
3247 /*
3248 * Allocate softstate for this target if there isn't already one. If there
3249 * is, just redo our internal configuration. If it is actually "new", we'll
3250 * soon get a tran_tgt_init for it.
3251 *
3252 * Called with PHY locked.
3253 */
3254 static void
pmcs_new_tport(pmcs_hw_t * pwp,pmcs_phy_t * pptr)3255 pmcs_new_tport(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
3256 {
3257 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "%s: phy 0x%p @ %s",
3258 __func__, (void *)pptr, pptr->path);
3259
3260 if (pmcs_configure_phy(pwp, pptr) == B_FALSE) {
3261 /*
3262 * If the config failed, mark the PHY as changed.
3263 */
3264 PHY_CHANGED(pwp, pptr);
3265 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3266 "%s: pmcs_configure_phy failed for phy 0x%p", __func__,
3267 (void *)pptr);
3268 return;
3269 }
3270
3271 /* Mark PHY as no longer changed */
3272 pptr->changed = 0;
3273
3274 /*
3275 * If the PHY has no target pointer:
3276 *
3277 * If it's a root PHY, see if another PHY in the iport holds the
3278 * target pointer (primary PHY changed). If so, move it over.
3279 *
3280 * If it's not a root PHY, see if there's a PHY on the dead_phys
3281 * list that matches.
3282 */
3283 if (pptr->target == NULL) {
3284 if (IS_ROOT_PHY(pptr)) {
3285 pmcs_phy_t *rphy = pwp->root_phys;
3286
3287 while (rphy) {
3288 if (rphy == pptr) {
3289 rphy = rphy->sibling;
3290 continue;
3291 }
3292
3293 mutex_enter(&rphy->phy_lock);
3294 if ((rphy->iport == pptr->iport) &&
3295 (rphy->target != NULL)) {
3296 mutex_enter(&rphy->target->statlock);
3297 pptr->target = rphy->target;
3298 rphy->target = NULL;
3299 pptr->target->phy = pptr;
3300 /* The target is now on pptr */
3301 mutex_exit(&pptr->target->statlock);
3302 mutex_exit(&rphy->phy_lock);
3303 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
3304 pptr, pptr->target,
3305 "%s: Moved target from %s to %s",
3306 __func__, rphy->path, pptr->path);
3307 break;
3308 }
3309 mutex_exit(&rphy->phy_lock);
3310
3311 rphy = rphy->sibling;
3312 }
3313 } else {
3314 pmcs_reap_dead_phy(pptr);
3315 }
3316 }
3317
3318 /*
3319 * Only assign the device if there is a target for this PHY with a
3320 * matching SAS address. If an iport is disconnected from one piece
3321 * of storage and connected to another within the iport stabilization
3322 * time, we can get the PHY/target mismatch situation.
3323 *
3324 * Otherwise, it'll get done in tran_tgt_init.
3325 */
3326 if (pptr->target) {
3327 mutex_enter(&pptr->target->statlock);
3328 if (pmcs_phy_target_match(pptr) == B_FALSE) {
3329 mutex_exit(&pptr->target->statlock);
3330 if (!IS_ROOT_PHY(pptr)) {
3331 pmcs_dec_phy_ref_count(pptr);
3332 }
3333 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
3334 "%s: Not assigning existing tgt %p for PHY %p "
3335 "(WWN mismatch)", __func__, (void *)pptr->target,
3336 (void *)pptr);
3337 pptr->target = NULL;
3338 return;
3339 }
3340
3341 if (!pmcs_assign_device(pwp, pptr->target)) {
3342 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target,
3343 "%s: pmcs_assign_device failed for target 0x%p",
3344 __func__, (void *)pptr->target);
3345 }
3346 mutex_exit(&pptr->target->statlock);
3347 }
3348 }
3349
3350 /*
3351 * Called with PHY lock held.
3352 */
3353 static boolean_t
pmcs_configure_phy(pmcs_hw_t * pwp,pmcs_phy_t * pptr)3354 pmcs_configure_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
3355 {
3356 char *dtype;
3357
3358 ASSERT(mutex_owned(&pptr->phy_lock));
3359
3360 /*
3361 * Mark this device as no longer changed.
3362 */
3363 pptr->changed = 0;
3364
3365 /*
3366 * If we don't have a device handle, get one.
3367 */
3368 if (pmcs_get_device_handle(pwp, pptr)) {
3369 return (B_FALSE);
3370 }
3371
3372 pptr->configured = 1;
3373
3374 switch (pptr->dtype) {
3375 case SAS:
3376 dtype = "SAS";
3377 break;
3378 case SATA:
3379 dtype = "SATA";
3380 break;
3381 case EXPANDER:
3382 dtype = "SMP";
3383 break;
3384 default:
3385 dtype = "???";
3386 }
3387
3388 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "config_dev: %s "
3389 "dev %s " SAS_ADDR_FMT " dev id 0x%x lr 0x%x", dtype, pptr->path,
3390 SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate);
3391
3392 return (B_TRUE);
3393 }
3394
3395 /*
3396 * Called with PHY locked
3397 */
3398 static void
pmcs_configure_expander(pmcs_hw_t * pwp,pmcs_phy_t * pptr,pmcs_iport_t * iport)3399 pmcs_configure_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, pmcs_iport_t *iport)
3400 {
3401 pmcs_phy_t *ctmp, *clist = NULL, *cnext;
3402 int result, i, nphy = 0;
3403 boolean_t root_phy = B_FALSE;
3404
3405 ASSERT(iport);
3406
3407 /*
3408 * Step 1- clear our "changed" bit. If we need to retry/restart due
3409 * to resource shortages, we'll set it again. While we're doing
3410 * configuration, other events may set it again as well. If the PHY
3411 * is a root PHY and is currently marked as having changed, reset the
3412 * config_stop timer as well.
3413 */
3414 if (IS_ROOT_PHY(pptr) && pptr->changed) {
3415 pptr->config_stop = ddi_get_lbolt() +
3416 drv_usectohz(PMCS_MAX_CONFIG_TIME);
3417 }
3418 pptr->changed = 0;
3419
3420 /*
3421 * Step 2- make sure we don't overflow
3422 */
3423 if (pptr->level == PMCS_MAX_XPND-1) {
3424 pmcs_prt(pwp, PMCS_PRT_WARN, pptr, NULL,
3425 "%s: SAS expansion tree too deep", __func__);
3426 return;
3427 }
3428
3429 /*
3430 * Step 3- Check if this expander is part of a wide phy that has
3431 * already been configured.
3432 *
3433 * This is known by checking this level for another EXPANDER device
3434 * with the same SAS address and isn't already marked as a subsidiary
3435 * phy and a parent whose SAS address is the same as our SAS address
3436 * (if there are parents).
3437 */
3438 if (!IS_ROOT_PHY(pptr)) {
3439 /*
3440 * No need to lock the parent here because we're in discovery
3441 * and the only time a PHY's children pointer can change is
3442 * in discovery; either in pmcs_clear_expander (which has
3443 * already been called) or here, down below. Plus, trying to
3444 * grab the parent's lock here can cause deadlock.
3445 */
3446 ctmp = pptr->parent->children;
3447 } else {
3448 ctmp = pwp->root_phys;
3449 root_phy = B_TRUE;
3450 }
3451
3452 while (ctmp) {
3453 /*
3454 * If we've checked all PHYs up to pptr, we stop. Otherwise,
3455 * we'll be checking for a primary PHY with a higher PHY
3456 * number than pptr, which will never happen. The primary
3457 * PHY on non-root expanders will ALWAYS be the lowest
3458 * numbered PHY.
3459 */
3460 if (ctmp == pptr) {
3461 break;
3462 }
3463
3464 /*
3465 * If pptr and ctmp are root PHYs, just grab the mutex on
3466 * ctmp. No need to lock the entire tree. If they are not
3467 * root PHYs, there is no need to lock since a non-root PHY's
3468 * SAS address and other characteristics can only change in
3469 * discovery anyway.
3470 */
3471 if (root_phy) {
3472 mutex_enter(&ctmp->phy_lock);
3473 }
3474
3475 if (ctmp->dtype == EXPANDER && ctmp->width &&
3476 memcmp(ctmp->sas_address, pptr->sas_address, 8) == 0) {
3477 int widephy = 0;
3478 /*
3479 * If these phys are not root PHYs, compare their SAS
3480 * addresses too.
3481 */
3482 if (!root_phy) {
3483 if (memcmp(ctmp->parent->sas_address,
3484 pptr->parent->sas_address, 8) == 0) {
3485 widephy = 1;
3486 }
3487 } else {
3488 widephy = 1;
3489 }
3490 if (widephy) {
3491 ctmp->width++;
3492 pptr->subsidiary = 1;
3493
3494 /*
3495 * Update the primary PHY's attached-port-pm
3496 * and target-port-pm information with the info
3497 * from this subsidiary
3498 */
3499 pmcs_update_phy_pm_props(ctmp,
3500 pptr->att_port_pm_tmp,
3501 pptr->tgt_port_pm_tmp, B_TRUE);
3502
3503 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3504 "%s: PHY %s part of wide PHY %s "
3505 "(now %d wide)", __func__, pptr->path,
3506 ctmp->path, ctmp->width);
3507 if (root_phy) {
3508 mutex_exit(&ctmp->phy_lock);
3509 }
3510 return;
3511 }
3512 }
3513
3514 cnext = ctmp->sibling;
3515 if (root_phy) {
3516 mutex_exit(&ctmp->phy_lock);
3517 }
3518 ctmp = cnext;
3519 }
3520
3521 /*
3522 * Step 4- If we don't have a device handle, get one. Since this
3523 * is the primary PHY, make sure subsidiary is cleared.
3524 */
3525 pptr->subsidiary = 0;
3526 pptr->iport = iport;
3527 if (pmcs_get_device_handle(pwp, pptr)) {
3528 goto out;
3529 }
3530 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL, "Config expander %s "
3531 SAS_ADDR_FMT " dev id 0x%x lr 0x%x", pptr->path,
3532 SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate);
3533
3534 /*
3535 * Step 5- figure out how many phys are in this expander.
3536 */
3537 nphy = pmcs_expander_get_nphy(pwp, pptr);
3538 if (nphy <= 0) {
3539 if (nphy == 0 && ddi_get_lbolt() < pptr->config_stop) {
3540 PHY_CHANGED(pwp, pptr);
3541 RESTART_DISCOVERY(pwp);
3542 } else {
3543 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3544 "%s: Retries exhausted for %s, killing", __func__,
3545 pptr->path);
3546 pptr->config_stop = 0;
3547 pmcs_kill_changed(pwp, pptr, 0);
3548 }
3549 goto out;
3550 }
3551
3552 /*
3553 * Step 6- Allocate a list of phys for this expander and figure out
3554 * what each one is.
3555 */
3556 for (i = 0; i < nphy; i++) {
3557 ctmp = kmem_cache_alloc(pwp->phy_cache, KM_SLEEP);
3558 bzero(ctmp, sizeof (pmcs_phy_t));
3559 ctmp->device_id = PMCS_INVALID_DEVICE_ID;
3560 ctmp->sibling = clist;
3561 ctmp->pend_dtype = NEW; /* Init pending dtype */
3562 ctmp->config_stop = ddi_get_lbolt() +
3563 drv_usectohz(PMCS_MAX_CONFIG_TIME);
3564 clist = ctmp;
3565 }
3566
3567 mutex_enter(&pwp->config_lock);
3568 if (pwp->config_changed) {
3569 RESTART_DISCOVERY_LOCKED(pwp);
3570 mutex_exit(&pwp->config_lock);
3571 /*
3572 * Clean up the newly allocated PHYs and return
3573 */
3574 while (clist) {
3575 ctmp = clist->sibling;
3576 clist->target_addr = NULL;
3577 kmem_cache_free(pwp->phy_cache, clist);
3578 clist = ctmp;
3579 }
3580 return;
3581 }
3582 mutex_exit(&pwp->config_lock);
3583
3584 /*
3585 * Step 7- Now fill in the rest of the static portions of the phy.
3586 */
3587 for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) {
3588 ctmp->parent = pptr;
3589 ctmp->pwp = pwp;
3590 ctmp->level = pptr->level+1;
3591 ctmp->portid = pptr->portid;
3592 if (ctmp->tolerates_sas2) {
3593 ASSERT(i < SAS2_PHYNUM_MAX);
3594 ctmp->phynum = i & SAS2_PHYNUM_MASK;
3595 } else {
3596 ASSERT(i < SAS_PHYNUM_MAX);
3597 ctmp->phynum = i & SAS_PHYNUM_MASK;
3598 }
3599 pmcs_phy_name(pwp, ctmp, ctmp->path, sizeof (ctmp->path));
3600 pmcs_lock_phy(ctmp);
3601 }
3602
3603 /*
3604 * Step 8- Discover things about each phy in the expander.
3605 */
3606 for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) {
3607 result = pmcs_expander_content_discover(pwp, pptr, ctmp);
3608 if (result <= 0) {
3609 if (ddi_get_lbolt() < pptr->config_stop) {
3610 PHY_CHANGED(pwp, pptr);
3611 RESTART_DISCOVERY(pwp);
3612 } else {
3613 pptr->config_stop = 0;
3614 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3615 "%s: Retries exhausted for %s, killing",
3616 __func__, pptr->path);
3617 pmcs_kill_changed(pwp, pptr, 0);
3618 }
3619 goto out;
3620 }
3621
3622 /* Set pend_dtype to dtype for 1st time initialization */
3623 ctmp->pend_dtype = ctmp->dtype;
3624 }
3625
3626 /*
3627 * Step 9: Install the new list on the next level. There should
3628 * typically be no children pointer on this PHY. There is one known
3629 * case where this can happen, though. If a root PHY goes down and
3630 * comes back up before discovery can run, we will fail to remove the
3631 * children from that PHY since it will no longer be marked dead.
3632 * However, in this case, all children should also be marked dead. If
3633 * we see that, take those children and put them on the dead_phys list.
3634 */
3635 if (pptr->children != NULL) {
3636 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
3637 "%s: Expander @ %s still has children: Clean up",
3638 __func__, pptr->path);
3639 pmcs_add_dead_phys(pwp, pptr->children);
3640 }
3641
3642 /*
3643 * Set the new children pointer for this expander
3644 */
3645 pptr->children = clist;
3646 clist = NULL;
3647 pptr->ncphy = nphy;
3648 pptr->configured = 1;
3649
3650 /*
3651 * We only set width if we're greater than level 0.
3652 */
3653 if (pptr->level) {
3654 pptr->width = 1;
3655 }
3656
3657 /*
3658 * Now tell the rest of the world about us, as an SMP node.
3659 */
3660 pptr->iport = iport;
3661 pmcs_new_tport(pwp, pptr);
3662
3663 out:
3664 while (clist) {
3665 ctmp = clist->sibling;
3666 pmcs_unlock_phy(clist);
3667 clist->target_addr = NULL;
3668 kmem_cache_free(pwp->phy_cache, clist);
3669 clist = ctmp;
3670 }
3671 }
3672
3673 /*
3674 * 2. Check expanders marked changed (but not dead) to see if they still have
3675 * the same number of phys and the same SAS address. Mark them, their subsidiary
3676 * phys (if wide) and their descendents dead if anything has changed. Check the
3677 * the devices they contain to see if *they* have changed. If they've changed
3678 * from type NOTHING we leave them marked changed to be configured later
3679 * (picking up a new SAS address and link rate if possible). Otherwise, any
3680 * change in type, SAS address or removal of target role will cause us to
3681 * mark them (and their descendents) as dead and cause any pending commands
3682 * and associated devices to be removed.
3683 *
3684 * Called with PHY (pptr) locked.
3685 */
3686
3687 static void
pmcs_check_expander(pmcs_hw_t * pwp,pmcs_phy_t * pptr)3688 pmcs_check_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
3689 {
3690 int nphy, result;
3691 pmcs_phy_t *ctmp, *local, *local_list = NULL, *local_tail = NULL;
3692 boolean_t kill_changed, changed;
3693
3694 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3695 "%s: check %s", __func__, pptr->path);
3696
3697 /*
3698 * Step 1: Mark phy as not changed. We will mark it changed if we need
3699 * to retry.
3700 */
3701 pptr->changed = 0;
3702
3703 /*
3704 * Reset the config_stop time. Although we're not actually configuring
3705 * anything here, we do want some indication of when to give up trying
3706 * if we can't communicate with the expander.
3707 */
3708 pptr->config_stop = ddi_get_lbolt() +
3709 drv_usectohz(PMCS_MAX_CONFIG_TIME);
3710
3711 /*
3712 * Step 2: Figure out how many phys are in this expander. If
3713 * pmcs_expander_get_nphy returns 0 we ran out of resources,
3714 * so reschedule and try later. If it returns another error,
3715 * just return.
3716 */
3717 nphy = pmcs_expander_get_nphy(pwp, pptr);
3718 if (nphy <= 0) {
3719 if ((nphy == 0) && (ddi_get_lbolt() < pptr->config_stop)) {
3720 PHY_CHANGED(pwp, pptr);
3721 RESTART_DISCOVERY(pwp);
3722 } else {
3723 pptr->config_stop = 0;
3724 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3725 "%s: Retries exhausted for %s, killing", __func__,
3726 pptr->path);
3727 pmcs_kill_changed(pwp, pptr, 0);
3728 }
3729 return;
3730 }
3731
3732 /*
3733 * Step 3: If the number of phys don't agree, kill the old sub-tree.
3734 */
3735 if (nphy != pptr->ncphy) {
3736 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3737 "%s: number of contained phys for %s changed from %d to %d",
3738 __func__, pptr->path, pptr->ncphy, nphy);
3739 /*
3740 * Force a rescan of this expander after dead contents
3741 * are cleared and removed.
3742 */
3743 pmcs_kill_changed(pwp, pptr, 0);
3744 return;
3745 }
3746
3747 /*
3748 * Step 4: if we're at the bottom of the stack, we're done
3749 * (we can't have any levels below us)
3750 */
3751 if (pptr->level == PMCS_MAX_XPND-1) {
3752 return;
3753 }
3754
3755 /*
3756 * Step 5: Discover things about each phy in this expander. We do
3757 * this by walking the current list of contained phys and doing a
3758 * content discovery for it to a local phy.
3759 */
3760 ctmp = pptr->children;
3761 ASSERT(ctmp);
3762 if (ctmp == NULL) {
3763 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3764 "%s: No children attached to expander @ %s?", __func__,
3765 pptr->path);
3766 return;
3767 }
3768
3769 while (ctmp) {
3770 /*
3771 * Allocate a local PHY to contain the proposed new contents
3772 * and link it to the rest of the local PHYs so that they
3773 * can all be freed later.
3774 */
3775 local = pmcs_clone_phy(ctmp);
3776
3777 if (local_list == NULL) {
3778 local_list = local;
3779 local_tail = local;
3780 } else {
3781 local_tail->sibling = local;
3782 local_tail = local;
3783 }
3784
3785 /*
3786 * Need to lock the local PHY since pmcs_expander_content_
3787 * discovery may call pmcs_clear_phy on it, which expects
3788 * the PHY to be locked.
3789 */
3790 pmcs_lock_phy(local);
3791 result = pmcs_expander_content_discover(pwp, pptr, local);
3792 pmcs_unlock_phy(local);
3793 if (result <= 0) {
3794 if (ddi_get_lbolt() < pptr->config_stop) {
3795 PHY_CHANGED(pwp, pptr);
3796 RESTART_DISCOVERY(pwp);
3797 } else {
3798 pptr->config_stop = 0;
3799 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
3800 "%s: Retries exhausted for %s, killing",
3801 __func__, pptr->path);
3802 pmcs_kill_changed(pwp, pptr, 0);
3803 }
3804
3805 /*
3806 * Release all the local PHYs that we allocated.
3807 */
3808 pmcs_free_phys(pwp, local_list);
3809 return;
3810 }
3811
3812 ctmp = ctmp->sibling;
3813 }
3814
3815 /*
3816 * Step 6: Compare the local PHY's contents to our current PHY. If
3817 * there are changes, take the appropriate action.
3818 * This is done in two steps (step 5 above, and 6 here) so that if we
3819 * have to bail during this process (e.g. pmcs_expander_content_discover
3820 * fails), we haven't actually changed the state of any of the real
3821 * PHYs. Next time we come through here, we'll be starting over from
3822 * scratch. This keeps us from marking a changed PHY as no longer
3823 * changed, but then having to bail only to come back next time and
3824 * think that the PHY hadn't changed. If this were to happen, we
3825 * would fail to properly configure the device behind this PHY.
3826 */
3827 local = local_list;
3828 ctmp = pptr->children;
3829
3830 while (ctmp) {
3831 changed = B_FALSE;
3832 kill_changed = B_FALSE;
3833
3834 /*
3835 * We set local to local_list prior to this loop so that we
3836 * can simply walk the local_list while we walk this list. The
3837 * two lists should be completely in sync.
3838 *
3839 * Clear the changed flag here.
3840 */
3841 ctmp->changed = 0;
3842
3843 if (ctmp->dtype != local->dtype) {
3844 if (ctmp->dtype != NOTHING) {
3845 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL,
3846 "%s: %s type changed from %s to %s "
3847 "(killing)", __func__, ctmp->path,
3848 PHY_TYPE(ctmp), PHY_TYPE(local));
3849 /*
3850 * Force a rescan of this expander after dead
3851 * contents are cleared and removed.
3852 */
3853 changed = B_TRUE;
3854 kill_changed = B_TRUE;
3855 } else {
3856 changed = B_TRUE;
3857 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL,
3858 "%s: %s type changed from NOTHING to %s",
3859 __func__, ctmp->path, PHY_TYPE(local));
3860 /*
3861 * Since this PHY was nothing and is now
3862 * something, reset the config_stop timer.
3863 */
3864 ctmp->config_stop = ddi_get_lbolt() +
3865 drv_usectohz(PMCS_MAX_CONFIG_TIME);
3866 }
3867
3868 } else if (ctmp->atdt != local->atdt) {
3869 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL, "%s: "
3870 "%s attached device type changed from %d to %d "
3871 "(killing)", __func__, ctmp->path, ctmp->atdt,
3872 local->atdt);
3873 /*
3874 * Force a rescan of this expander after dead
3875 * contents are cleared and removed.
3876 */
3877 changed = B_TRUE;
3878
3879 if (local->atdt == 0) {
3880 kill_changed = B_TRUE;
3881 }
3882 } else if (ctmp->link_rate != local->link_rate) {
3883 pmcs_prt(pwp, PMCS_PRT_INFO, ctmp, NULL, "%s: %s "
3884 "changed speed from %s to %s", __func__, ctmp->path,
3885 pmcs_get_rate(ctmp->link_rate),
3886 pmcs_get_rate(local->link_rate));
3887 /* If the speed changed from invalid, force rescan */
3888 if (!PMCS_VALID_LINK_RATE(ctmp->link_rate)) {
3889 changed = B_TRUE;
3890 RESTART_DISCOVERY(pwp);
3891 } else {
3892 /* Just update to the new link rate */
3893 ctmp->link_rate = local->link_rate;
3894 }
3895
3896 if (!PMCS_VALID_LINK_RATE(local->link_rate)) {
3897 kill_changed = B_TRUE;
3898 }
3899 } else if (memcmp(ctmp->sas_address, local->sas_address,
3900 sizeof (ctmp->sas_address)) != 0) {
3901 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL,
3902 "%s: SAS Addr for %s changed from " SAS_ADDR_FMT
3903 "to " SAS_ADDR_FMT " (kill old tree)", __func__,
3904 ctmp->path, SAS_ADDR_PRT(ctmp->sas_address),
3905 SAS_ADDR_PRT(local->sas_address));
3906 /*
3907 * Force a rescan of this expander after dead
3908 * contents are cleared and removed.
3909 */
3910 changed = B_TRUE;
3911 } else {
3912 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL,
3913 "%s: %s looks the same (type %s)",
3914 __func__, ctmp->path, PHY_TYPE(ctmp));
3915 /*
3916 * If EXPANDER, still mark it changed so we
3917 * re-evaluate its contents. If it's not an expander,
3918 * but it hasn't been configured, also mark it as
3919 * changed so that it will undergo configuration.
3920 */
3921 if (ctmp->dtype == EXPANDER) {
3922 changed = B_TRUE;
3923 } else if ((ctmp->dtype != NOTHING) &&
3924 !ctmp->configured) {
3925 ctmp->changed = 1;
3926 } else {
3927 /* It simply hasn't changed */
3928 ctmp->changed = 0;
3929 }
3930 }
3931
3932 /*
3933 * If the PHY changed, call pmcs_kill_changed if indicated,
3934 * update its contents to reflect its current state and mark it
3935 * as changed.
3936 */
3937 if (changed) {
3938 /*
3939 * pmcs_kill_changed will mark the PHY as changed, so
3940 * only do PHY_CHANGED if we did not do kill_changed.
3941 */
3942 if (kill_changed) {
3943 pmcs_kill_changed(pwp, ctmp, 0);
3944 } else {
3945 /*
3946 * If we're not killing the device, it's not
3947 * dead. Mark the PHY as changed.
3948 */
3949 PHY_CHANGED(pwp, ctmp);
3950
3951 if (ctmp->dead) {
3952 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
3953 ctmp, NULL, "%s: Unmarking PHY %s "
3954 "dead, restarting discovery",
3955 __func__, ctmp->path);
3956 ctmp->dead = 0;
3957 RESTART_DISCOVERY(pwp);
3958 }
3959 }
3960
3961 /*
3962 * If the dtype of this PHY is now NOTHING, mark it as
3963 * unconfigured. Set pend_dtype to what the new dtype
3964 * is. It'll get updated at the end of the discovery
3965 * process.
3966 */
3967 if (local->dtype == NOTHING) {
3968 bzero(ctmp->sas_address,
3969 sizeof (local->sas_address));
3970 ctmp->atdt = 0;
3971 ctmp->link_rate = 0;
3972 ctmp->pend_dtype = NOTHING;
3973 ctmp->configured = 0;
3974 } else {
3975 (void) memcpy(ctmp->sas_address,
3976 local->sas_address,
3977 sizeof (local->sas_address));
3978 ctmp->atdt = local->atdt;
3979 ctmp->link_rate = local->link_rate;
3980 ctmp->pend_dtype = local->dtype;
3981 ctmp->att_port_pm_tmp = local->att_port_pm_tmp;
3982 ctmp->tgt_port_pm_tmp = local->tgt_port_pm_tmp;
3983 }
3984 }
3985
3986 local = local->sibling;
3987 ctmp = ctmp->sibling;
3988 }
3989
3990 /*
3991 * If we got to here, that means we were able to see all the PHYs
3992 * and we can now update all of the real PHYs with the information
3993 * we got on the local PHYs. Once that's done, free all the local
3994 * PHYs.
3995 */
3996
3997 pmcs_free_phys(pwp, local_list);
3998 }
3999
4000 /*
4001 * Top level routine to check expanders. We call pmcs_check_expander for
4002 * each expander. Since we're not doing any configuration right now, it
4003 * doesn't matter if this is breadth-first.
4004 */
4005 static void
pmcs_check_expanders(pmcs_hw_t * pwp,pmcs_phy_t * pptr)4006 pmcs_check_expanders(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
4007 {
4008 pmcs_phy_t *phyp, *pnext, *pchild;
4009
4010 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4011 "%s: %s", __func__, pptr->path);
4012
4013 /*
4014 * Check each expander at this level
4015 */
4016 phyp = pptr;
4017 while (phyp) {
4018 pmcs_lock_phy(phyp);
4019
4020 if ((phyp->dtype == EXPANDER) && phyp->changed &&
4021 !phyp->dead && !phyp->subsidiary &&
4022 phyp->configured) {
4023 pmcs_check_expander(pwp, phyp);
4024 }
4025
4026 pnext = phyp->sibling;
4027 pmcs_unlock_phy(phyp);
4028 phyp = pnext;
4029 }
4030
4031 /*
4032 * Now check the children
4033 */
4034 phyp = pptr;
4035 while (phyp) {
4036 pmcs_lock_phy(phyp);
4037 pnext = phyp->sibling;
4038 pchild = phyp->children;
4039 pmcs_unlock_phy(phyp);
4040
4041 if (pchild) {
4042 pmcs_check_expanders(pwp, pchild);
4043 }
4044
4045 phyp = pnext;
4046 }
4047 }
4048
4049 /*
4050 * Called with softstate and PHY locked
4051 */
4052 static void
pmcs_clear_expander(pmcs_hw_t * pwp,pmcs_phy_t * pptr,int level)4053 pmcs_clear_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, int level)
4054 {
4055 pmcs_phy_t *ctmp;
4056
4057 ASSERT(mutex_owned(&pwp->lock));
4058 ASSERT(mutex_owned(&pptr->phy_lock));
4059 ASSERT(pptr->level < PMCS_MAX_XPND - 1);
4060
4061 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4062 "%s: checking %s", __func__, pptr->path);
4063
4064 ctmp = pptr->children;
4065 while (ctmp) {
4066 /*
4067 * If the expander is dead, mark its children dead
4068 */
4069 if (pptr->dead) {
4070 ctmp->dead = 1;
4071 }
4072 if (ctmp->dtype == EXPANDER) {
4073 pmcs_clear_expander(pwp, ctmp, level + 1);
4074 }
4075 ctmp = ctmp->sibling;
4076 }
4077
4078 /*
4079 * If this expander is not dead, we're done here.
4080 */
4081 if (!pptr->dead) {
4082 return;
4083 }
4084
4085 /*
4086 * Now snip out the list of children below us and release them
4087 */
4088 if (pptr->children) {
4089 pmcs_add_dead_phys(pwp, pptr->children);
4090 }
4091
4092 pptr->children = NULL;
4093
4094 /*
4095 * Clear subsidiary phys as well. Getting the parent's PHY lock
4096 * is only necessary if level == 0 since otherwise the parent is
4097 * already locked.
4098 */
4099 if (!IS_ROOT_PHY(pptr)) {
4100 if (level == 0) {
4101 mutex_enter(&pptr->parent->phy_lock);
4102 }
4103 ctmp = pptr->parent->children;
4104 if (level == 0) {
4105 mutex_exit(&pptr->parent->phy_lock);
4106 }
4107 } else {
4108 ctmp = pwp->root_phys;
4109 }
4110
4111 while (ctmp) {
4112 if (ctmp == pptr) {
4113 ctmp = ctmp->sibling;
4114 continue;
4115 }
4116 /*
4117 * We only need to lock subsidiary PHYs on the level 0
4118 * expander. Any children of that expander, subsidiaries or
4119 * not, will already be locked.
4120 */
4121 if (level == 0) {
4122 pmcs_lock_phy(ctmp);
4123 }
4124 if (ctmp->dtype != EXPANDER || ctmp->subsidiary == 0 ||
4125 memcmp(ctmp->sas_address, pptr->sas_address,
4126 sizeof (ctmp->sas_address)) != 0) {
4127 if (level == 0) {
4128 pmcs_unlock_phy(ctmp);
4129 }
4130 ctmp = ctmp->sibling;
4131 continue;
4132 }
4133 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL,
4134 "%s: subsidiary %s", __func__, ctmp->path);
4135 pmcs_clear_phy(pwp, ctmp);
4136 if (level == 0) {
4137 pmcs_unlock_phy(ctmp);
4138 }
4139 ctmp = ctmp->sibling;
4140 }
4141
4142 pmcs_clear_phy(pwp, pptr);
4143 }
4144
4145 /*
4146 * Called with PHY locked and with scratch acquired. We return 0 if
4147 * we fail to allocate resources or notice that the configuration
4148 * count changed while we were running the command. We return
4149 * less than zero if we had an I/O error or received an unsupported
4150 * configuration. Otherwise we return the number of phys in the
4151 * expander.
4152 */
4153 #define DFM(m, y) if (m == NULL) m = y
4154 static int
pmcs_expander_get_nphy(pmcs_hw_t * pwp,pmcs_phy_t * pptr)4155 pmcs_expander_get_nphy(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
4156 {
4157 struct pmcwork *pwrk;
4158 pmcs_iport_t *iport;
4159 char buf[64];
4160 const uint_t rdoff = 0x100; /* returned data offset */
4161 smp_response_frame_t *srf;
4162 smp_report_general_resp_t *srgr;
4163 uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status, ival;
4164 int result = 0;
4165
4166 ival = 0x40001100;
4167
4168 again:
4169 if (!pptr->iport || !pptr->valid_device_id) {
4170 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target,
4171 "%s: Can't reach PHY %s", __func__, pptr->path);
4172 goto out;
4173 }
4174
4175 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
4176 if (pwrk == NULL) {
4177 goto out;
4178 }
4179 (void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE);
4180 pwrk->arg = pwp->scratch;
4181 pwrk->dtype = pptr->dtype;
4182 pwrk->xp = pptr->target;
4183 pwrk->htag |= PMCS_TAG_NONIO_CMD;
4184 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4185 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4186 if (ptr == NULL) {
4187 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4188 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, NULL,
4189 "%s: GET_IQ_ENTRY failed", __func__);
4190 pmcs_pwork(pwp, pwrk);
4191 goto out;
4192 }
4193
4194 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST));
4195 msg[1] = LE_32(pwrk->htag);
4196 msg[2] = LE_32(pptr->device_id);
4197 msg[3] = LE_32((4 << SMP_REQUEST_LENGTH_SHIFT) | SMP_INDIRECT_RESPONSE);
4198 /*
4199 * Send SMP REPORT GENERAL (of either SAS1.1 or SAS2 flavors).
4200 */
4201 msg[4] = BE_32(ival);
4202 msg[5] = 0;
4203 msg[6] = 0;
4204 msg[7] = 0;
4205 msg[8] = 0;
4206 msg[9] = 0;
4207 msg[10] = 0;
4208 msg[11] = 0;
4209 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff));
4210 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff));
4211 msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff);
4212 msg[15] = 0;
4213
4214 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE);
4215
4216 pmcs_hold_iport(pptr->iport);
4217 iport = pptr->iport;
4218 pmcs_smp_acquire(iport);
4219 pwrk->state = PMCS_WORK_STATE_ONCHIP;
4220 htag = pwrk->htag;
4221 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4222 pmcs_unlock_phy(pptr);
4223 WAIT_FOR(pwrk, 1000, result);
4224 pmcs_pwork(pwp, pwrk);
4225 pmcs_smp_release(iport);
4226 pmcs_rele_iport(iport);
4227 pmcs_lock_phy(pptr);
4228 if (result) {
4229 pmcs_timed_out(pwp, htag, __func__);
4230 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4231 "%s: Issuing SMP ABORT for htag 0x%08x", __func__, htag);
4232 if (pmcs_abort(pwp, pptr, htag, 0, 1)) {
4233 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4234 "%s: SMP ABORT failed for cmd (htag 0x%08x)",
4235 __func__, htag);
4236 }
4237 result = 0;
4238 goto out;
4239 }
4240
4241 mutex_enter(&pwp->config_lock);
4242 if (pwp->config_changed) {
4243 RESTART_DISCOVERY_LOCKED(pwp);
4244 mutex_exit(&pwp->config_lock);
4245 result = 0;
4246 goto out;
4247 }
4248 mutex_exit(&pwp->config_lock);
4249
4250 ptr = (void *)pwp->scratch;
4251 status = LE_32(ptr[2]);
4252 if (status == PMCOUT_STATUS_UNDERFLOW ||
4253 status == PMCOUT_STATUS_OVERFLOW) {
4254 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, pptr, NULL,
4255 "%s: over/underflow", __func__);
4256 status = PMCOUT_STATUS_OK;
4257 }
4258 srf = (smp_response_frame_t *)&((uint32_t *)pwp->scratch)[rdoff >> 2];
4259 srgr = (smp_report_general_resp_t *)
4260 &((uint32_t *)pwp->scratch)[(rdoff >> 2)+1];
4261
4262 if (status != PMCOUT_STATUS_OK) {
4263 char *nag = NULL;
4264 (void) snprintf(buf, sizeof (buf),
4265 "%s: SMP op failed (0x%x)", __func__, status);
4266 switch (status) {
4267 case PMCOUT_STATUS_IO_PORT_IN_RESET:
4268 DFM(nag, "I/O Port In Reset");
4269 /* FALLTHROUGH */
4270 case PMCOUT_STATUS_ERROR_HW_TIMEOUT:
4271 DFM(nag, "Hardware Timeout");
4272 /* FALLTHROUGH */
4273 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE:
4274 DFM(nag, "Internal SMP Resource Failure");
4275 /* FALLTHROUGH */
4276 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
4277 DFM(nag, "PHY Not Ready");
4278 /* FALLTHROUGH */
4279 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
4280 DFM(nag, "Connection Rate Not Supported");
4281 /* FALLTHROUGH */
4282 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
4283 DFM(nag, "Open Retry Timeout");
4284 /* FALLTHROUGH */
4285 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
4286 DFM(nag, "HW Resource Busy");
4287 /* FALLTHROUGH */
4288 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR:
4289 DFM(nag, "Response Connection Error");
4290 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4291 "%s: expander %s SMP operation failed (%s)",
4292 __func__, pptr->path, nag);
4293 break;
4294
4295 /*
4296 * For the IO_DS_NON_OPERATIONAL case, we need to kick off
4297 * device state recovery and return 0 so that the caller
4298 * doesn't assume this expander is dead for good.
4299 */
4300 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: {
4301 pmcs_xscsi_t *xp = pptr->target;
4302
4303 pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, pptr, xp,
4304 "%s: expander %s device state non-operational",
4305 __func__, pptr->path);
4306
4307 if (xp == NULL) {
4308 /*
4309 * Kick off recovery right now.
4310 */
4311 SCHEDULE_WORK(pwp, PMCS_WORK_DS_ERR_RECOVERY);
4312 (void) ddi_taskq_dispatch(pwp->tq, pmcs_worker,
4313 pwp, DDI_NOSLEEP);
4314 } else {
4315 mutex_enter(&xp->statlock);
4316 pmcs_start_dev_state_recovery(xp, pptr);
4317 mutex_exit(&xp->statlock);
4318 }
4319
4320 break;
4321 }
4322
4323 default:
4324 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr);
4325 result = -EIO;
4326 break;
4327 }
4328 } else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) {
4329 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4330 "%s: bad response frame type 0x%x",
4331 __func__, srf->srf_frame_type);
4332 result = -EINVAL;
4333 } else if (srf->srf_function != SMP_FUNC_REPORT_GENERAL) {
4334 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4335 "%s: bad response function 0x%x",
4336 __func__, srf->srf_function);
4337 result = -EINVAL;
4338 } else if (srf->srf_result != 0) {
4339 /*
4340 * Check to see if we have a value of 3 for failure and
4341 * whether we were using a SAS2.0 allocation length value
4342 * and retry without it.
4343 */
4344 if (srf->srf_result == 3 && (ival & 0xff00)) {
4345 ival &= ~0xff00;
4346 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4347 "%s: err 0x%x with SAS2 request- retry with SAS1",
4348 __func__, srf->srf_result);
4349 goto again;
4350 }
4351 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4352 "%s: bad response 0x%x", __func__, srf->srf_result);
4353 result = -EINVAL;
4354 } else if (srgr->srgr_configuring) {
4355 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4356 "%s: expander at phy %s is still configuring",
4357 __func__, pptr->path);
4358 result = 0;
4359 } else {
4360 result = srgr->srgr_number_of_phys;
4361 if (ival & 0xff00) {
4362 pptr->tolerates_sas2 = 1;
4363 }
4364 /*
4365 * Save off the REPORT_GENERAL response
4366 */
4367 bcopy(srgr, &pptr->rg_resp, sizeof (smp_report_general_resp_t));
4368 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4369 "%s has %d phys and %s SAS2", pptr->path, result,
4370 pptr->tolerates_sas2? "tolerates" : "does not tolerate");
4371 }
4372 out:
4373 return (result);
4374 }
4375
4376 /*
4377 * Called with expander locked (and thus, pptr) as well as all PHYs up to
4378 * the root, and scratch acquired. Return 0 if we fail to allocate resources
4379 * or notice that the configuration changed while we were running the command.
4380 *
4381 * We return less than zero if we had an I/O error or received an
4382 * unsupported configuration.
4383 */
4384 static int
pmcs_expander_content_discover(pmcs_hw_t * pwp,pmcs_phy_t * expander,pmcs_phy_t * pptr)4385 pmcs_expander_content_discover(pmcs_hw_t *pwp, pmcs_phy_t *expander,
4386 pmcs_phy_t *pptr)
4387 {
4388 struct pmcwork *pwrk;
4389 pmcs_iport_t *iport;
4390 char buf[64];
4391 uint8_t sas_address[8];
4392 uint8_t att_sas_address[8];
4393 smp_response_frame_t *srf;
4394 smp_discover_resp_t *sdr;
4395 const uint_t rdoff = 0x100; /* returned data offset */
4396 uint8_t *roff;
4397 uint32_t status, *ptr, msg[PMCS_MSG_SIZE], htag;
4398 int result = 0;
4399 uint8_t ini_support;
4400 uint8_t tgt_support;
4401
4402 if (!expander->iport || !expander->valid_device_id) {
4403 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, expander, expander->target,
4404 "%s: Can't reach PHY %s", __func__, expander->path);
4405 goto out;
4406 }
4407
4408 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, expander);
4409 if (pwrk == NULL) {
4410 goto out;
4411 }
4412 (void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE);
4413 pwrk->arg = pwp->scratch;
4414 pwrk->dtype = expander->dtype;
4415 pwrk->xp = expander->target;
4416 pwrk->htag |= PMCS_TAG_NONIO_CMD;
4417 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST));
4418 msg[1] = LE_32(pwrk->htag);
4419 msg[2] = LE_32(expander->device_id);
4420 msg[3] = LE_32((12 << SMP_REQUEST_LENGTH_SHIFT) |
4421 SMP_INDIRECT_RESPONSE);
4422 /*
4423 * Send SMP DISCOVER (of either SAS1.1 or SAS2 flavors).
4424 */
4425 if (expander->tolerates_sas2) {
4426 msg[4] = BE_32(0x40101B00);
4427 } else {
4428 msg[4] = BE_32(0x40100000);
4429 }
4430 msg[5] = 0;
4431 msg[6] = BE_32((pptr->phynum << 16));
4432 msg[7] = 0;
4433 msg[8] = 0;
4434 msg[9] = 0;
4435 msg[10] = 0;
4436 msg[11] = 0;
4437 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff));
4438 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff));
4439 msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff);
4440 msg[15] = 0;
4441 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4442 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4443 if (ptr == NULL) {
4444 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4445 goto out;
4446 }
4447
4448 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE);
4449
4450 pmcs_hold_iport(expander->iport);
4451 iport = expander->iport;
4452 pmcs_smp_acquire(iport);
4453 pwrk->state = PMCS_WORK_STATE_ONCHIP;
4454 htag = pwrk->htag;
4455 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4456 pmcs_unlock_phy(expander);
4457 WAIT_FOR(pwrk, 1000, result);
4458 pmcs_pwork(pwp, pwrk);
4459 pmcs_smp_release(iport);
4460 pmcs_rele_iport(iport);
4461 pmcs_lock_phy(expander);
4462 if (result) {
4463 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4464 "%s: Issuing SMP ABORT for htag 0x%08x", __func__, htag);
4465 if (pmcs_abort(pwp, pptr, htag, 0, 1)) {
4466 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4467 "%s: SMP ABORT failed for cmd (htag 0x%08x)",
4468 __func__, htag);
4469 }
4470 result = -ETIMEDOUT;
4471 goto out;
4472 }
4473
4474 mutex_enter(&pwp->config_lock);
4475 if (pwp->config_changed) {
4476 RESTART_DISCOVERY_LOCKED(pwp);
4477 mutex_exit(&pwp->config_lock);
4478 result = 0;
4479 goto out;
4480 }
4481
4482 mutex_exit(&pwp->config_lock);
4483 ptr = (void *)pwp->scratch;
4484 /*
4485 * Point roff to the DMA offset for returned data
4486 */
4487 roff = pwp->scratch;
4488 roff += rdoff;
4489 srf = (smp_response_frame_t *)roff;
4490 sdr = (smp_discover_resp_t *)(roff+4);
4491 status = LE_32(ptr[2]);
4492 if (status == PMCOUT_STATUS_UNDERFLOW ||
4493 status == PMCOUT_STATUS_OVERFLOW) {
4494 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, pptr, NULL,
4495 "%s: over/underflow", __func__);
4496 status = PMCOUT_STATUS_OK;
4497 }
4498 if (status != PMCOUT_STATUS_OK) {
4499 char *nag = NULL;
4500 (void) snprintf(buf, sizeof (buf),
4501 "%s: SMP op failed (0x%x)", __func__, status);
4502 switch (status) {
4503 case PMCOUT_STATUS_ERROR_HW_TIMEOUT:
4504 DFM(nag, "Hardware Timeout");
4505 /* FALLTHROUGH */
4506 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE:
4507 DFM(nag, "Internal SMP Resource Failure");
4508 /* FALLTHROUGH */
4509 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
4510 DFM(nag, "PHY Not Ready");
4511 /* FALLTHROUGH */
4512 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
4513 DFM(nag, "Connection Rate Not Supported");
4514 /* FALLTHROUGH */
4515 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
4516 DFM(nag, "Open Retry Timeout");
4517 /* FALLTHROUGH */
4518 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
4519 DFM(nag, "HW Resource Busy");
4520 /* FALLTHROUGH */
4521 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR:
4522 DFM(nag, "Response Connection Error");
4523 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4524 "%s: expander %s SMP operation failed (%s)",
4525 __func__, pptr->path, nag);
4526 break;
4527 default:
4528 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr);
4529 result = -EIO;
4530 break;
4531 }
4532 goto out;
4533 } else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) {
4534 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4535 "%s: bad response frame type 0x%x",
4536 __func__, srf->srf_frame_type);
4537 result = -EINVAL;
4538 goto out;
4539 } else if (srf->srf_function != SMP_FUNC_DISCOVER) {
4540 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4541 "%s: bad response function 0x%x",
4542 __func__, srf->srf_function);
4543 result = -EINVAL;
4544 goto out;
4545 } else if (srf->srf_result != SMP_RES_FUNCTION_ACCEPTED) {
4546 result = pmcs_smp_function_result(pwp, srf);
4547 /* Need not fail if PHY is Vacant */
4548 if (result != SMP_RES_PHY_VACANT) {
4549 result = -EINVAL;
4550 goto out;
4551 }
4552 }
4553
4554 /*
4555 * Save off the DISCOVER response
4556 */
4557 bcopy(sdr, &pptr->disc_resp, sizeof (smp_discover_resp_t));
4558
4559 ini_support = (sdr->sdr_attached_sata_host |
4560 (sdr->sdr_attached_smp_initiator << 1) |
4561 (sdr->sdr_attached_stp_initiator << 2) |
4562 (sdr->sdr_attached_ssp_initiator << 3));
4563
4564 tgt_support = (sdr->sdr_attached_sata_device |
4565 (sdr->sdr_attached_smp_target << 1) |
4566 (sdr->sdr_attached_stp_target << 2) |
4567 (sdr->sdr_attached_ssp_target << 3));
4568
4569 pmcs_wwn2barray(BE_64(sdr->sdr_sas_addr), sas_address);
4570 pmcs_wwn2barray(BE_64(sdr->sdr_attached_sas_addr), att_sas_address);
4571
4572 pptr->virtual = sdr->sdr_virtual_phy;
4573
4574 /*
4575 * Set the routing attribute regardless of the PHY type.
4576 */
4577 pptr->routing_attr = sdr->sdr_routing_attr;
4578
4579 switch (sdr->sdr_attached_device_type) {
4580 case SAS_IF_DTYPE_ENDPOINT:
4581 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4582 "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS="
4583 SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x",
4584 pptr->path,
4585 sdr->sdr_attached_device_type,
4586 sdr->sdr_negotiated_logical_link_rate,
4587 ini_support,
4588 tgt_support,
4589 SAS_ADDR_PRT(sas_address),
4590 SAS_ADDR_PRT(att_sas_address),
4591 sdr->sdr_attached_phy_identifier);
4592
4593 if (sdr->sdr_attached_sata_device ||
4594 sdr->sdr_attached_stp_target) {
4595 pptr->dtype = SATA;
4596 } else if (sdr->sdr_attached_ssp_target) {
4597 pptr->dtype = SAS;
4598 } else if (tgt_support || ini_support) {
4599 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4600 "%s: %s has tgt support=%x init support=(%x)",
4601 __func__, pptr->path, tgt_support, ini_support);
4602 }
4603
4604 switch (pptr->routing_attr) {
4605 case SMP_ROUTING_SUBTRACTIVE:
4606 case SMP_ROUTING_TABLE:
4607 case SMP_ROUTING_DIRECT:
4608 pptr->routing_method = SMP_ROUTING_DIRECT;
4609 break;
4610 default:
4611 pptr->routing_method = 0xff; /* Invalid method */
4612 break;
4613 }
4614 pmcs_update_phy_pm_props(pptr, (1ULL << pptr->phynum),
4615 (1ULL << sdr->sdr_attached_phy_identifier), B_TRUE);
4616 break;
4617 case SAS_IF_DTYPE_EDGE:
4618 case SAS_IF_DTYPE_FANOUT:
4619 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4620 "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS="
4621 SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x",
4622 pptr->path,
4623 sdr->sdr_attached_device_type,
4624 sdr->sdr_negotiated_logical_link_rate,
4625 ini_support,
4626 tgt_support,
4627 SAS_ADDR_PRT(sas_address),
4628 SAS_ADDR_PRT(att_sas_address),
4629 sdr->sdr_attached_phy_identifier);
4630 if (sdr->sdr_attached_smp_target) {
4631 /*
4632 * Avoid configuring phys that just point back
4633 * at a parent phy
4634 */
4635 if (expander->parent &&
4636 memcmp(expander->parent->sas_address,
4637 att_sas_address,
4638 sizeof (expander->parent->sas_address)) == 0) {
4639 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, NULL,
4640 "%s: skipping port back to parent "
4641 "expander (%s)", __func__, pptr->path);
4642 pptr->dtype = NOTHING;
4643 break;
4644 }
4645 pptr->dtype = EXPANDER;
4646
4647 } else if (tgt_support || ini_support) {
4648 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4649 "%s has tgt support=%x init support=(%x)",
4650 pptr->path, tgt_support, ini_support);
4651 pptr->dtype = EXPANDER;
4652 }
4653 if (pptr->routing_attr == SMP_ROUTING_DIRECT) {
4654 pptr->routing_method = 0xff; /* Invalid method */
4655 } else {
4656 pptr->routing_method = pptr->routing_attr;
4657 }
4658 pmcs_update_phy_pm_props(pptr, (1ULL << pptr->phynum),
4659 (1ULL << sdr->sdr_attached_phy_identifier), B_TRUE);
4660 break;
4661 default:
4662 pptr->dtype = NOTHING;
4663 break;
4664 }
4665 if (pptr->dtype != NOTHING) {
4666 pmcs_phy_t *ctmp;
4667
4668 /*
4669 * If the attached device is a SATA device and the expander
4670 * is (possibly) a SAS2 compliant expander, check for whether
4671 * there is a NAA=5 WWN field starting at this offset and
4672 * use that for the SAS Address for this device.
4673 */
4674 if (expander->tolerates_sas2 && pptr->dtype == SATA &&
4675 (roff[SAS_ATTACHED_NAME_OFFSET] >> 8) == NAA_IEEE_REG) {
4676 (void) memcpy(pptr->sas_address,
4677 &roff[SAS_ATTACHED_NAME_OFFSET], 8);
4678 } else {
4679 (void) memcpy(pptr->sas_address, att_sas_address, 8);
4680 }
4681 pptr->atdt = (sdr->sdr_attached_device_type);
4682 /*
4683 * Now run up from the expander's parent up to the top to
4684 * make sure we only use the least common link_rate.
4685 */
4686 for (ctmp = expander->parent; ctmp; ctmp = ctmp->parent) {
4687 if (ctmp->link_rate <
4688 sdr->sdr_negotiated_logical_link_rate) {
4689 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, NULL,
4690 "%s: derating link rate from %x to %x due "
4691 "to %s being slower", pptr->path,
4692 sdr->sdr_negotiated_logical_link_rate,
4693 ctmp->link_rate,
4694 ctmp->path);
4695 sdr->sdr_negotiated_logical_link_rate =
4696 ctmp->link_rate;
4697 }
4698 }
4699 pptr->link_rate = sdr->sdr_negotiated_logical_link_rate;
4700 pptr->state.prog_min_rate = sdr->sdr_prog_min_phys_link_rate;
4701 pptr->state.hw_min_rate = sdr->sdr_hw_min_phys_link_rate;
4702 pptr->state.prog_max_rate = sdr->sdr_prog_max_phys_link_rate;
4703 pptr->state.hw_max_rate = sdr->sdr_hw_max_phys_link_rate;
4704 PHY_CHANGED(pwp, pptr);
4705 } else {
4706 pmcs_clear_phy(pwp, pptr);
4707 }
4708 result = 1;
4709 out:
4710 return (result);
4711 }
4712
4713 /*
4714 * Get a work structure and assign it a tag with type and serial number
4715 * If a structure is returned, it is returned locked.
4716 */
4717 pmcwork_t *
pmcs_gwork(pmcs_hw_t * pwp,uint32_t tag_type,pmcs_phy_t * phyp)4718 pmcs_gwork(pmcs_hw_t *pwp, uint32_t tag_type, pmcs_phy_t *phyp)
4719 {
4720 pmcwork_t *p;
4721 uint16_t snum;
4722 uint32_t off;
4723
4724 mutex_enter(&pwp->wfree_lock);
4725 p = STAILQ_FIRST(&pwp->wf);
4726 if (p == NULL) {
4727 /*
4728 * If we couldn't get a work structure, it's time to bite
4729 * the bullet, grab the pfree_lock and copy over all the
4730 * work structures from the pending free list to the actual
4731 * free list (assuming it's not also empty).
4732 */
4733 mutex_enter(&pwp->pfree_lock);
4734 if (STAILQ_FIRST(&pwp->pf) == NULL) {
4735 mutex_exit(&pwp->pfree_lock);
4736 mutex_exit(&pwp->wfree_lock);
4737 return (NULL);
4738 }
4739 pwp->wf.stqh_first = pwp->pf.stqh_first;
4740 pwp->wf.stqh_last = pwp->pf.stqh_last;
4741 STAILQ_INIT(&pwp->pf);
4742 mutex_exit(&pwp->pfree_lock);
4743
4744 p = STAILQ_FIRST(&pwp->wf);
4745 ASSERT(p != NULL);
4746 }
4747 STAILQ_REMOVE(&pwp->wf, p, pmcwork, next);
4748 snum = pwp->wserno++;
4749 mutex_exit(&pwp->wfree_lock);
4750
4751 off = p - pwp->work;
4752
4753 mutex_enter(&p->lock);
4754 ASSERT(p->state == PMCS_WORK_STATE_NIL);
4755 ASSERT(p->htag == PMCS_TAG_FREE);
4756 p->htag = (tag_type << PMCS_TAG_TYPE_SHIFT) & PMCS_TAG_TYPE_MASK;
4757 p->htag |= ((snum << PMCS_TAG_SERNO_SHIFT) & PMCS_TAG_SERNO_MASK);
4758 p->htag |= ((off << PMCS_TAG_INDEX_SHIFT) & PMCS_TAG_INDEX_MASK);
4759 p->start = gethrtime();
4760 p->state = PMCS_WORK_STATE_READY;
4761 p->ssp_event = 0;
4762 p->dead = 0;
4763 p->timer = 0;
4764
4765 if (phyp) {
4766 p->phy = phyp;
4767 pmcs_inc_phy_ref_count(phyp);
4768 }
4769
4770 return (p);
4771 }
4772
4773 /*
4774 * Called with pwrk lock held. Returned with lock released.
4775 */
4776 void
pmcs_pwork(pmcs_hw_t * pwp,pmcwork_t * p)4777 pmcs_pwork(pmcs_hw_t *pwp, pmcwork_t *p)
4778 {
4779 ASSERT(p != NULL);
4780 ASSERT(mutex_owned(&p->lock));
4781
4782 p->last_ptr = p->ptr;
4783 p->last_arg = p->arg;
4784 p->last_phy = p->phy;
4785 p->last_xp = p->xp;
4786 p->last_htag = p->htag;
4787 p->last_state = p->state;
4788 p->finish = gethrtime();
4789
4790 if (p->phy) {
4791 pmcs_dec_phy_ref_count(p->phy);
4792 }
4793
4794 p->state = PMCS_WORK_STATE_NIL;
4795 p->htag = PMCS_TAG_FREE;
4796 p->xp = NULL;
4797 p->ptr = NULL;
4798 p->arg = NULL;
4799 p->phy = NULL;
4800 p->abt_htag = 0;
4801 p->timer = 0;
4802 p->onwire = 0;
4803 p->ssp_event = 0;
4804 mutex_exit(&p->lock);
4805
4806 if (mutex_tryenter(&pwp->wfree_lock) == 0) {
4807 mutex_enter(&pwp->pfree_lock);
4808 STAILQ_INSERT_TAIL(&pwp->pf, p, next);
4809 mutex_exit(&pwp->pfree_lock);
4810 } else {
4811 STAILQ_INSERT_TAIL(&pwp->wf, p, next);
4812 mutex_exit(&pwp->wfree_lock);
4813 }
4814 }
4815
4816 /*
4817 * Find a work structure based upon a tag and make sure that the tag
4818 * serial number matches the work structure we've found.
4819 * If a structure is found, its lock is held upon return.
4820 * If lock_phy is B_TRUE, then lock the phy also when returning the work struct
4821 */
4822 pmcwork_t *
pmcs_tag2wp(pmcs_hw_t * pwp,uint32_t htag,boolean_t lock_phy)4823 pmcs_tag2wp(pmcs_hw_t *pwp, uint32_t htag, boolean_t lock_phy)
4824 {
4825 pmcwork_t *p;
4826 pmcs_phy_t *phyp;
4827 uint32_t idx = PMCS_TAG_INDEX(htag);
4828
4829 p = &pwp->work[idx];
4830
4831 mutex_enter(&p->lock);
4832 if (p->htag == htag) {
4833 if (lock_phy) {
4834 phyp = p->phy;
4835 if (phyp != NULL) {
4836 /* phy lock should be held before work lock */
4837 mutex_exit(&p->lock);
4838 mutex_enter(&phyp->phy_lock);
4839 mutex_enter(&p->lock);
4840 }
4841 /*
4842 * Check htag again, in case the work got completed
4843 * while we dropped the work lock and got the phy lock
4844 */
4845 if (p->htag != htag) {
4846 if (phyp != NULL) {
4847 mutex_exit(&p->lock);
4848 mutex_exit(&phyp->phy_lock);
4849 }
4850 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, "%s: "
4851 "HTAG (0x%x) found, but work (0x%p) "
4852 "is already complete", __func__, htag,
4853 (void *)p);
4854 return (NULL);
4855 }
4856 }
4857 return (p);
4858 }
4859 mutex_exit(&p->lock);
4860 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL,
4861 "INDEX 0x%x HTAG 0x%x got p->htag 0x%x", idx, htag, p->htag);
4862 return (NULL);
4863 }
4864
4865 /*
4866 * Issue an abort for a command or for all commands.
4867 *
4868 * Since this can be called from interrupt context,
4869 * we don't wait for completion if wait is not set.
4870 *
4871 * Called with PHY lock held.
4872 */
4873 int
pmcs_abort(pmcs_hw_t * pwp,pmcs_phy_t * pptr,uint32_t tag,int all_cmds,int wait)4874 pmcs_abort(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint32_t tag, int all_cmds,
4875 int wait)
4876 {
4877 pmcwork_t *pwrk;
4878 pmcs_xscsi_t *tgt;
4879 uint32_t msg[PMCS_MSG_SIZE], *ptr;
4880 int result, abt_type;
4881 uint32_t abt_htag, status;
4882
4883 if (pptr->abort_all_start) {
4884 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, "%s: ABORT_ALL for "
4885 "(%s) already in progress.", __func__, pptr->path);
4886 return (EBUSY);
4887 }
4888
4889 switch (pptr->dtype) {
4890 case SAS:
4891 abt_type = PMCIN_SSP_ABORT;
4892 break;
4893 case SATA:
4894 abt_type = PMCIN_SATA_ABORT;
4895 break;
4896 case EXPANDER:
4897 abt_type = PMCIN_SMP_ABORT;
4898 break;
4899 default:
4900 return (0);
4901 }
4902
4903 pwrk = pmcs_gwork(pwp, wait ? PMCS_TAG_TYPE_WAIT : PMCS_TAG_TYPE_NONE,
4904 pptr);
4905
4906 if (pwrk == NULL) {
4907 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__);
4908 return (ENOMEM);
4909 }
4910
4911 pwrk->dtype = pptr->dtype;
4912 pwrk->xp = pptr->target;
4913 pwrk->htag |= PMCS_TAG_NONIO_CMD;
4914 if (wait) {
4915 pwrk->arg = msg;
4916 }
4917 if (pptr->valid_device_id == 0) {
4918 pmcs_pwork(pwp, pwrk);
4919 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4920 "%s: Invalid DeviceID", __func__);
4921 return (ENODEV);
4922 }
4923 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, abt_type));
4924 msg[1] = LE_32(pwrk->htag);
4925 msg[2] = LE_32(pptr->device_id);
4926 if (all_cmds) {
4927 msg[3] = 0;
4928 msg[4] = LE_32(1);
4929 pwrk->ptr = NULL;
4930 pwrk->abt_htag = PMCS_ABT_HTAG_ALL;
4931 pptr->abort_all_start = gethrtime();
4932 } else {
4933 msg[3] = LE_32(tag);
4934 msg[4] = 0;
4935 pwrk->abt_htag = tag;
4936 }
4937 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4938 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4939 if (ptr == NULL) {
4940 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
4941 pmcs_pwork(pwp, pwrk);
4942 pptr->abort_all_start = 0;
4943 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__);
4944 return (ENOMEM);
4945 }
4946
4947 COPY_MESSAGE(ptr, msg, 5);
4948 if (all_cmds) {
4949 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4950 "%s: aborting all commands for %s device %s. (htag=0x%x)",
4951 __func__, pmcs_get_typename(pptr->dtype), pptr->path,
4952 msg[1]);
4953 } else {
4954 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
4955 "%s: aborting tag 0x%x for %s device %s. (htag=0x%x)",
4956 __func__, tag, pmcs_get_typename(pptr->dtype), pptr->path,
4957 msg[1]);
4958 }
4959 pwrk->state = PMCS_WORK_STATE_ONCHIP;
4960
4961 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
4962 if (!wait) {
4963 mutex_exit(&pwrk->lock);
4964 return (0);
4965 }
4966
4967 abt_htag = pwrk->htag;
4968 pmcs_unlock_phy(pptr);
4969 WAIT_FOR(pwrk, 1000, result);
4970 pmcs_pwork(pwp, pwrk);
4971 pmcs_lock_phy(pptr);
4972 tgt = pptr->target;
4973
4974 if (all_cmds) {
4975 pptr->abort_all_start = 0;
4976 cv_signal(&pptr->abort_all_cv);
4977 }
4978
4979 if (result) {
4980 if (all_cmds) {
4981 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
4982 "%s: Abort all request timed out", __func__);
4983 } else {
4984 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
4985 "%s: Abort (htag 0x%08x) request timed out",
4986 __func__, abt_htag);
4987 }
4988 if (tgt != NULL) {
4989 mutex_enter(&tgt->statlock);
4990 if ((tgt->dev_state != PMCS_DEVICE_STATE_IN_RECOVERY) &&
4991 (tgt->dev_state !=
4992 PMCS_DEVICE_STATE_NON_OPERATIONAL)) {
4993 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
4994 "%s: Trying DS error recovery for tgt 0x%p",
4995 __func__, (void *)tgt);
4996 (void) pmcs_send_err_recovery_cmd(pwp,
4997 PMCS_DEVICE_STATE_IN_RECOVERY, pptr, tgt);
4998 }
4999 mutex_exit(&tgt->statlock);
5000 }
5001 return (ETIMEDOUT);
5002 }
5003
5004 status = LE_32(msg[2]);
5005 if (status != PMCOUT_STATUS_OK) {
5006 /*
5007 * The only non-success status are IO_NOT_VALID &
5008 * IO_ABORT_IN_PROGRESS.
5009 * In case of IO_ABORT_IN_PROGRESS, the other ABORT cmd's
5010 * status is of concern and this duplicate cmd status can
5011 * be ignored.
5012 * If IO_NOT_VALID, that's not an error per-se.
5013 * For abort of single I/O complete the command anyway.
5014 * If, however, we were aborting all, that is a problem
5015 * as IO_NOT_VALID really means that the IO or device is
5016 * not there. So, discovery process will take of the cleanup.
5017 */
5018 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
5019 "%s: abort result 0x%x", __func__, LE_32(msg[2]));
5020 if (all_cmds) {
5021 PHY_CHANGED(pwp, pptr);
5022 RESTART_DISCOVERY(pwp);
5023 } else {
5024 return (EINVAL);
5025 }
5026
5027 return (0);
5028 }
5029
5030 if (tgt != NULL) {
5031 mutex_enter(&tgt->statlock);
5032 if (tgt->dev_state == PMCS_DEVICE_STATE_IN_RECOVERY) {
5033 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
5034 "%s: Restoring OPERATIONAL dev_state for tgt 0x%p",
5035 __func__, (void *)tgt);
5036 (void) pmcs_send_err_recovery_cmd(pwp,
5037 PMCS_DEVICE_STATE_OPERATIONAL, pptr, tgt);
5038 }
5039 mutex_exit(&tgt->statlock);
5040 }
5041
5042 return (0);
5043 }
5044
5045 /*
5046 * Issue a task management function to an SSP device.
5047 *
5048 * Called with PHY lock held.
5049 * statlock CANNOT be held upon entry.
5050 */
5051 int
pmcs_ssp_tmf(pmcs_hw_t * pwp,pmcs_phy_t * pptr,uint8_t tmf,uint32_t tag,uint64_t lun,uint32_t * response)5052 pmcs_ssp_tmf(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t tmf, uint32_t tag,
5053 uint64_t lun, uint32_t *response)
5054 {
5055 int result, ds;
5056 uint8_t local[PMCS_QENTRY_SIZE << 1], *xd;
5057 sas_ssp_rsp_iu_t *rptr = (void *)local;
5058 static const uint8_t ssp_rsp_evec[] = {
5059 0x58, 0x61, 0x56, 0x72, 0x00
5060 };
5061 uint32_t msg[PMCS_MSG_SIZE], *ptr, status;
5062 struct pmcwork *pwrk;
5063 pmcs_xscsi_t *xp;
5064
5065 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
5066 if (pwrk == NULL) {
5067 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nowrk, __func__);
5068 return (ENOMEM);
5069 }
5070 /*
5071 * NB: We use the PMCS_OQ_GENERAL outbound queue
5072 * NB: so as to not get entangled in normal I/O
5073 * NB: processing.
5074 */
5075 pwrk->htag |= PMCS_TAG_NONIO_CMD;
5076 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
5077 PMCIN_SSP_INI_TM_START));
5078 msg[1] = LE_32(pwrk->htag);
5079 msg[2] = LE_32(pptr->device_id);
5080 if (tmf == SAS_ABORT_TASK || tmf == SAS_QUERY_TASK) {
5081 msg[3] = LE_32(tag);
5082 } else {
5083 msg[3] = 0;
5084 }
5085 msg[4] = LE_32(tmf);
5086 msg[5] = BE_32((uint32_t)lun);
5087 msg[6] = BE_32((uint32_t)(lun >> 32));
5088 msg[7] = LE_32(PMCIN_MESSAGE_REPORT);
5089
5090 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5091 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
5092 if (ptr == NULL) {
5093 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5094 pmcs_pwork(pwp, pwrk);
5095 pmcs_prt(pwp, PMCS_PRT_ERR, pptr, NULL, pmcs_nomsg, __func__);
5096 return (ENOMEM);
5097 }
5098 COPY_MESSAGE(ptr, msg, 7);
5099 pwrk->arg = msg;
5100 pwrk->dtype = pptr->dtype;
5101 xp = pptr->target;
5102 pwrk->xp = xp;
5103
5104 if (xp != NULL) {
5105 mutex_enter(&xp->statlock);
5106 if (xp->dev_state == PMCS_DEVICE_STATE_NON_OPERATIONAL) {
5107 mutex_exit(&xp->statlock);
5108 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5109 pmcs_pwork(pwp, pwrk);
5110 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: Not "
5111 "sending '%s' because DS is '%s'", __func__,
5112 pmcs_tmf2str(tmf), pmcs_status_str
5113 (PMCOUT_STATUS_IO_DS_NON_OPERATIONAL));
5114 return (EIO);
5115 }
5116 mutex_exit(&xp->statlock);
5117 }
5118
5119 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5120 "%s: sending '%s' to %s (lun %llu) tag 0x%x", __func__,
5121 pmcs_tmf2str(tmf), pptr->path, (unsigned long long) lun, tag);
5122 pwrk->state = PMCS_WORK_STATE_ONCHIP;
5123 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
5124
5125 pmcs_unlock_phy(pptr);
5126 /*
5127 * This is a command sent to the target device, so it can take
5128 * significant amount of time to complete when path & device is busy.
5129 * Set a timeout to 20 seconds
5130 */
5131 WAIT_FOR(pwrk, 20000, result);
5132 pmcs_pwork(pwp, pwrk);
5133 pmcs_lock_phy(pptr);
5134 xp = pptr->target;
5135
5136 if (result) {
5137 if (xp == NULL) {
5138 return (ETIMEDOUT);
5139 }
5140
5141 mutex_enter(&xp->statlock);
5142 pmcs_start_dev_state_recovery(xp, pptr);
5143 mutex_exit(&xp->statlock);
5144 return (ETIMEDOUT);
5145 }
5146
5147 status = LE_32(msg[2]);
5148 if (status != PMCOUT_STATUS_OK) {
5149 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5150 "%s: status %s for TMF %s action to %s, lun %llu",
5151 __func__, pmcs_status_str(status), pmcs_tmf2str(tmf),
5152 pptr->path, (unsigned long long) lun);
5153 if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) ||
5154 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) ||
5155 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) {
5156 ds = PMCS_DEVICE_STATE_NON_OPERATIONAL;
5157 } else if (status == PMCOUT_STATUS_IO_DS_IN_RECOVERY) {
5158 /*
5159 * If the status is IN_RECOVERY, it's an indication
5160 * that it's now time for us to request to have the
5161 * device state set to OPERATIONAL since we're the ones
5162 * that requested recovery to begin with.
5163 */
5164 ds = PMCS_DEVICE_STATE_OPERATIONAL;
5165 } else {
5166 ds = PMCS_DEVICE_STATE_IN_RECOVERY;
5167 }
5168 if (xp != NULL) {
5169 mutex_enter(&xp->statlock);
5170 if (xp->dev_state != ds) {
5171 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5172 "%s: Sending err recovery cmd"
5173 " for tgt 0x%p (status = %s)",
5174 __func__, (void *)xp,
5175 pmcs_status_str(status));
5176 (void) pmcs_send_err_recovery_cmd(pwp, ds,
5177 pptr, xp);
5178 }
5179 mutex_exit(&xp->statlock);
5180 }
5181 return (EIO);
5182 } else {
5183 ds = PMCS_DEVICE_STATE_OPERATIONAL;
5184 if (xp != NULL) {
5185 mutex_enter(&xp->statlock);
5186 if (xp->dev_state != ds) {
5187 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5188 "%s: Sending err recovery cmd"
5189 " for tgt 0x%p (status = %s)",
5190 __func__, (void *)xp,
5191 pmcs_status_str(status));
5192 (void) pmcs_send_err_recovery_cmd(pwp, ds,
5193 pptr, xp);
5194 }
5195 mutex_exit(&xp->statlock);
5196 }
5197 }
5198 if (LE_32(msg[3]) == 0) {
5199 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5200 "TMF completed with no response");
5201 return (EIO);
5202 }
5203 pmcs_endian_transform(pwp, local, &msg[5], ssp_rsp_evec);
5204 xd = (uint8_t *)(&msg[5]);
5205 xd += SAS_RSP_HDR_SIZE;
5206 if (rptr->datapres != SAS_RSP_DATAPRES_RESPONSE_DATA) {
5207 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5208 "%s: TMF response not RESPONSE DATA (0x%x)",
5209 __func__, rptr->datapres);
5210 return (EIO);
5211 }
5212 if (rptr->response_data_length != 4) {
5213 pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
5214 "Bad SAS RESPONSE DATA LENGTH", msg);
5215 return (EIO);
5216 }
5217 (void) memcpy(&status, xd, sizeof (uint32_t));
5218 status = BE_32(status);
5219 if (response != NULL)
5220 *response = status;
5221 /*
5222 * The status is actually in the low-order byte. The upper three
5223 * bytes contain additional information for the TMFs that support them.
5224 * However, at this time we do not issue any of those. In the other
5225 * cases, the upper three bytes are supposed to be 0, but it appears
5226 * they aren't always. Just mask them off.
5227 */
5228 switch (status & 0xff) {
5229 case SAS_RSP_TMF_COMPLETE:
5230 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5231 "%s: TMF complete", __func__);
5232 result = 0;
5233 break;
5234 case SAS_RSP_TMF_SUCCEEDED:
5235 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5236 "%s: TMF succeeded", __func__);
5237 result = 0;
5238 break;
5239 case SAS_RSP_INVALID_FRAME:
5240 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5241 "%s: TMF returned INVALID FRAME", __func__);
5242 result = EIO;
5243 break;
5244 case SAS_RSP_TMF_NOT_SUPPORTED:
5245 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5246 "%s: TMF returned TMF NOT SUPPORTED", __func__);
5247 result = EIO;
5248 break;
5249 case SAS_RSP_TMF_FAILED:
5250 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5251 "%s: TMF returned TMF FAILED", __func__);
5252 result = EIO;
5253 break;
5254 case SAS_RSP_TMF_INCORRECT_LUN:
5255 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5256 "%s: TMF returned INCORRECT LUN", __func__);
5257 result = EIO;
5258 break;
5259 case SAS_RSP_OVERLAPPED_OIPTTA:
5260 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5261 "%s: TMF returned OVERLAPPED INITIATOR PORT TRANSFER TAG "
5262 "ATTEMPTED", __func__);
5263 result = EIO;
5264 break;
5265 default:
5266 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
5267 "%s: TMF returned unknown code 0x%x", __func__, status);
5268 result = EIO;
5269 break;
5270 }
5271 return (result);
5272 }
5273
5274 /*
5275 * Called with PHY lock held and scratch acquired
5276 */
5277 int
pmcs_sata_abort_ncq(pmcs_hw_t * pwp,pmcs_phy_t * pptr)5278 pmcs_sata_abort_ncq(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
5279 {
5280 const char *utag_fail_fmt = "%s: untagged NCQ command failure";
5281 const char *tag_fail_fmt = "%s: NCQ command failure (tag 0x%x)";
5282 uint32_t msg[PMCS_QENTRY_SIZE], *ptr, result, status;
5283 uint8_t *fp = pwp->scratch, ds;
5284 fis_t fis;
5285 pmcwork_t *pwrk;
5286 pmcs_xscsi_t *tgt;
5287
5288 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
5289 if (pwrk == NULL) {
5290 return (ENOMEM);
5291 }
5292 pwrk->htag |= PMCS_TAG_NONIO_CMD;
5293 msg[0] = LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE,
5294 PMCIN_SATA_HOST_IO_START));
5295 msg[1] = LE_32(pwrk->htag);
5296 msg[2] = LE_32(pptr->device_id);
5297 msg[3] = LE_32(512);
5298 msg[4] = LE_32(SATA_PROTOCOL_PIO | PMCIN_DATADIR_2_INI);
5299 msg[5] = LE_32((READ_LOG_EXT << 16) | (C_BIT << 8) | FIS_REG_H2DEV);
5300 msg[6] = LE_32(0x10);
5301 msg[8] = LE_32(1);
5302 msg[9] = 0;
5303 msg[10] = 0;
5304 msg[11] = 0;
5305 msg[12] = LE_32(DWORD0(pwp->scratch_dma));
5306 msg[13] = LE_32(DWORD1(pwp->scratch_dma));
5307 msg[14] = LE_32(512);
5308 msg[15] = 0;
5309
5310 pwrk->arg = msg;
5311 pwrk->dtype = pptr->dtype;
5312 pwrk->xp = pptr->target;
5313
5314 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5315 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
5316 if (ptr == NULL) {
5317 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5318 pmcs_pwork(pwp, pwrk);
5319 return (ENOMEM);
5320 }
5321 COPY_MESSAGE(ptr, msg, PMCS_QENTRY_SIZE);
5322 pwrk->state = PMCS_WORK_STATE_ONCHIP;
5323 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
5324
5325 pmcs_unlock_phy(pptr);
5326 WAIT_FOR(pwrk, 250, result);
5327 pmcs_pwork(pwp, pwrk);
5328 pmcs_lock_phy(pptr);
5329
5330 tgt = pptr->target;
5331 if (result) {
5332 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, pmcs_timeo, __func__);
5333 return (EIO);
5334 }
5335 status = LE_32(msg[2]);
5336 if (status != PMCOUT_STATUS_OK || LE_32(msg[3])) {
5337 if (tgt == NULL) {
5338 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
5339 "%s: cannot find target for phy 0x%p for "
5340 "dev state recovery", __func__, (void *)pptr);
5341 return (EIO);
5342 }
5343
5344 mutex_enter(&tgt->statlock);
5345
5346 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, "READ LOG EXT", msg);
5347 if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) ||
5348 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) ||
5349 (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) {
5350 ds = PMCS_DEVICE_STATE_NON_OPERATIONAL;
5351 } else {
5352 ds = PMCS_DEVICE_STATE_IN_RECOVERY;
5353 }
5354 if (tgt->dev_state != ds) {
5355 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt, "%s: Trying "
5356 "SATA DS Recovery for tgt(0x%p) for status(%s)",
5357 __func__, (void *)tgt, pmcs_status_str(status));
5358 (void) pmcs_send_err_recovery_cmd(pwp, ds, pptr, tgt);
5359 }
5360
5361 mutex_exit(&tgt->statlock);
5362 return (EIO);
5363 }
5364 fis[0] = (fp[4] << 24) | (fp[3] << 16) | (fp[2] << 8) | FIS_REG_D2H;
5365 fis[1] = (fp[8] << 24) | (fp[7] << 16) | (fp[6] << 8) | fp[5];
5366 fis[2] = (fp[12] << 24) | (fp[11] << 16) | (fp[10] << 8) | fp[9];
5367 fis[3] = (fp[16] << 24) | (fp[15] << 16) | (fp[14] << 8) | fp[13];
5368 fis[4] = 0;
5369 if (fp[0] & 0x80) {
5370 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
5371 utag_fail_fmt, __func__);
5372 } else {
5373 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, tgt,
5374 tag_fail_fmt, __func__, fp[0] & 0x1f);
5375 }
5376 pmcs_fis_dump(pwp, fis);
5377 pptr->need_rl_ext = 0;
5378 return (0);
5379 }
5380
5381 /*
5382 * Transform a structure from CPU to Device endian format, or
5383 * vice versa, based upon a transformation vector.
5384 *
5385 * A transformation vector is an array of bytes, each byte
5386 * of which is defined thusly:
5387 *
5388 * bit 7: from CPU to desired endian, otherwise from desired endian
5389 * to CPU format
5390 * bit 6: Big Endian, else Little Endian
5391 * bits 5-4:
5392 * 00 Undefined
5393 * 01 One Byte quantities
5394 * 02 Two Byte quantities
5395 * 03 Four Byte quantities
5396 *
5397 * bits 3-0:
5398 * 00 Undefined
5399 * Number of quantities to transform
5400 *
5401 * The vector is terminated by a 0 value.
5402 */
5403
5404 void
pmcs_endian_transform(pmcs_hw_t * pwp,void * orig_out,void * orig_in,const uint8_t * xfvec)5405 pmcs_endian_transform(pmcs_hw_t *pwp, void *orig_out, void *orig_in,
5406 const uint8_t *xfvec)
5407 {
5408 uint8_t c, *out = orig_out, *in = orig_in;
5409
5410 if (xfvec == NULL) {
5411 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
5412 "%s: null xfvec", __func__);
5413 return;
5414 }
5415 if (out == NULL) {
5416 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
5417 "%s: null out", __func__);
5418 return;
5419 }
5420 if (in == NULL) {
5421 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
5422 "%s: null in", __func__);
5423 return;
5424 }
5425 while ((c = *xfvec++) != 0) {
5426 int nbyt = (c & 0xf);
5427 int size = (c >> 4) & 0x3;
5428 int bige = (c >> 4) & 0x4;
5429
5430 switch (size) {
5431 case 1:
5432 {
5433 while (nbyt-- > 0) {
5434 *out++ = *in++;
5435 }
5436 break;
5437 }
5438 case 2:
5439 {
5440 uint16_t tmp;
5441 while (nbyt-- > 0) {
5442 (void) memcpy(&tmp, in, sizeof (uint16_t));
5443 if (bige) {
5444 tmp = BE_16(tmp);
5445 } else {
5446 tmp = LE_16(tmp);
5447 }
5448 (void) memcpy(out, &tmp, sizeof (uint16_t));
5449 out += sizeof (uint16_t);
5450 in += sizeof (uint16_t);
5451 }
5452 break;
5453 }
5454 case 3:
5455 {
5456 uint32_t tmp;
5457 while (nbyt-- > 0) {
5458 (void) memcpy(&tmp, in, sizeof (uint32_t));
5459 if (bige) {
5460 tmp = BE_32(tmp);
5461 } else {
5462 tmp = LE_32(tmp);
5463 }
5464 (void) memcpy(out, &tmp, sizeof (uint32_t));
5465 out += sizeof (uint32_t);
5466 in += sizeof (uint32_t);
5467 }
5468 break;
5469 }
5470 default:
5471 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
5472 "%s: bad size", __func__);
5473 return;
5474 }
5475 }
5476 }
5477
5478 const char *
pmcs_get_rate(unsigned int linkrt)5479 pmcs_get_rate(unsigned int linkrt)
5480 {
5481 const char *rate;
5482 switch (linkrt) {
5483 case SAS_LINK_RATE_1_5GBIT:
5484 rate = "1.5";
5485 break;
5486 case SAS_LINK_RATE_3GBIT:
5487 rate = "3.0";
5488 break;
5489 case SAS_LINK_RATE_6GBIT:
5490 rate = "6.0";
5491 break;
5492 default:
5493 rate = "???";
5494 break;
5495 }
5496 return (rate);
5497 }
5498
5499 const char *
pmcs_get_typename(pmcs_dtype_t type)5500 pmcs_get_typename(pmcs_dtype_t type)
5501 {
5502 switch (type) {
5503 case NOTHING:
5504 return ("NIL");
5505 case SATA:
5506 return ("SATA");
5507 case SAS:
5508 return ("SSP");
5509 case EXPANDER:
5510 return ("EXPANDER");
5511 }
5512 return ("????");
5513 }
5514
5515 const char *
pmcs_tmf2str(int tmf)5516 pmcs_tmf2str(int tmf)
5517 {
5518 switch (tmf) {
5519 case SAS_ABORT_TASK:
5520 return ("Abort Task");
5521 case SAS_ABORT_TASK_SET:
5522 return ("Abort Task Set");
5523 case SAS_CLEAR_TASK_SET:
5524 return ("Clear Task Set");
5525 case SAS_LOGICAL_UNIT_RESET:
5526 return ("Logical Unit Reset");
5527 case SAS_I_T_NEXUS_RESET:
5528 return ("I_T Nexus Reset");
5529 case SAS_CLEAR_ACA:
5530 return ("Clear ACA");
5531 case SAS_QUERY_TASK:
5532 return ("Query Task");
5533 case SAS_QUERY_TASK_SET:
5534 return ("Query Task Set");
5535 case SAS_QUERY_UNIT_ATTENTION:
5536 return ("Query Unit Attention");
5537 default:
5538 return ("Unknown");
5539 }
5540 }
5541
5542 const char *
pmcs_status_str(uint32_t status)5543 pmcs_status_str(uint32_t status)
5544 {
5545 switch (status) {
5546 case PMCOUT_STATUS_OK:
5547 return ("OK");
5548 case PMCOUT_STATUS_ABORTED:
5549 return ("ABORTED");
5550 case PMCOUT_STATUS_OVERFLOW:
5551 return ("OVERFLOW");
5552 case PMCOUT_STATUS_UNDERFLOW:
5553 return ("UNDERFLOW");
5554 case PMCOUT_STATUS_FAILED:
5555 return ("FAILED");
5556 case PMCOUT_STATUS_ABORT_RESET:
5557 return ("ABORT_RESET");
5558 case PMCOUT_STATUS_IO_NOT_VALID:
5559 return ("IO_NOT_VALID");
5560 case PMCOUT_STATUS_NO_DEVICE:
5561 return ("NO_DEVICE");
5562 case PMCOUT_STATUS_ILLEGAL_PARAMETER:
5563 return ("ILLEGAL_PARAMETER");
5564 case PMCOUT_STATUS_LINK_FAILURE:
5565 return ("LINK_FAILURE");
5566 case PMCOUT_STATUS_PROG_ERROR:
5567 return ("PROG_ERROR");
5568 case PMCOUT_STATUS_EDC_IN_ERROR:
5569 return ("EDC_IN_ERROR");
5570 case PMCOUT_STATUS_EDC_OUT_ERROR:
5571 return ("EDC_OUT_ERROR");
5572 case PMCOUT_STATUS_ERROR_HW_TIMEOUT:
5573 return ("ERROR_HW_TIMEOUT");
5574 case PMCOUT_STATUS_XFER_ERR_BREAK:
5575 return ("XFER_ERR_BREAK");
5576 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
5577 return ("XFER_ERR_PHY_NOT_READY");
5578 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED:
5579 return ("OPEN_CNX_PROTOCOL_NOT_SUPPORTED");
5580 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION:
5581 return ("OPEN_CNX_ERROR_ZONE_VIOLATION");
5582 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK:
5583 return ("OPEN_CNX_ERROR_BREAK");
5584 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
5585 return ("OPEN_CNX_ERROR_IT_NEXUS_LOSS");
5586 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION:
5587 return ("OPENCNX_ERROR_BAD_DESTINATION");
5588 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
5589 return ("OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED");
5590 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
5591 return ("OPEN_CNX_ERROR_STP_RESOURCES_BUSY");
5592 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION:
5593 return ("OPEN_CNX_ERROR_WRONG_DESTINATION");
5594 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR:
5595 return ("OPEN_CNX_ERROR_UNKNOWN_ERROR");
5596 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED:
5597 return ("IO_XFER_ERROR_NAK_RECEIVED");
5598 case PMCOUT_STATUS_XFER_ERROR_ACK_NAK_TIMEOUT:
5599 return ("XFER_ERROR_ACK_NAK_TIMEOUT");
5600 case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED:
5601 return ("XFER_ERROR_PEER_ABORTED");
5602 case PMCOUT_STATUS_XFER_ERROR_RX_FRAME:
5603 return ("XFER_ERROR_RX_FRAME");
5604 case PMCOUT_STATUS_IO_XFER_ERROR_DMA:
5605 return ("IO_XFER_ERROR_DMA");
5606 case PMCOUT_STATUS_XFER_ERROR_CREDIT_TIMEOUT:
5607 return ("XFER_ERROR_CREDIT_TIMEOUT");
5608 case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT:
5609 return ("XFER_ERROR_SATA_LINK_TIMEOUT");
5610 case PMCOUT_STATUS_XFER_ERROR_SATA:
5611 return ("XFER_ERROR_SATA");
5612 case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE:
5613 return ("XFER_ERROR_REJECTED_NCQ_MODE");
5614 case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST:
5615 return ("XFER_ERROR_ABORTED_DUE_TO_SRST");
5616 case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE:
5617 return ("XFER_ERROR_ABORTED_NCQ_MODE");
5618 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
5619 return ("IO_XFER_OPEN_RETRY_TIMEOUT");
5620 case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR:
5621 return ("SMP_RESP_CONNECTION_ERROR");
5622 case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE:
5623 return ("XFER_ERROR_UNEXPECTED_PHASE");
5624 case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN:
5625 return ("XFER_ERROR_RDY_OVERRUN");
5626 case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED:
5627 return ("XFER_ERROR_RDY_NOT_EXPECTED");
5628 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
5629 return ("XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT");
5630 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK:
5631 return ("XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK");
5632 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK:
5633 return ("XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK");
5634 case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH:
5635 return ("XFER_ERROR_OFFSET_MISMATCH");
5636 case PMCOUT_STATUS_XFER_ERROR_ZERO_DATA_LEN:
5637 return ("XFER_ERROR_ZERO_DATA_LEN");
5638 case PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED:
5639 return ("XFER_CMD_FRAME_ISSUED");
5640 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE:
5641 return ("ERROR_INTERNAL_SMP_RESOURCE");
5642 case PMCOUT_STATUS_IO_PORT_IN_RESET:
5643 return ("IO_PORT_IN_RESET");
5644 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL:
5645 return ("DEVICE STATE NON-OPERATIONAL");
5646 case PMCOUT_STATUS_IO_DS_IN_RECOVERY:
5647 return ("DEVICE STATE IN RECOVERY");
5648 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
5649 return ("OPEN CNX ERR HW RESOURCE BUSY");
5650 default:
5651 return (NULL);
5652 }
5653 }
5654
5655 uint64_t
pmcs_barray2wwn(uint8_t ba[8])5656 pmcs_barray2wwn(uint8_t ba[8])
5657 {
5658 uint64_t result = 0;
5659 int i;
5660
5661 for (i = 0; i < 8; i++) {
5662 result <<= 8;
5663 result |= ba[i];
5664 }
5665 return (result);
5666 }
5667
5668 void
pmcs_wwn2barray(uint64_t wwn,uint8_t ba[8])5669 pmcs_wwn2barray(uint64_t wwn, uint8_t ba[8])
5670 {
5671 int i;
5672 for (i = 0; i < 8; i++) {
5673 ba[7 - i] = wwn & 0xff;
5674 wwn >>= 8;
5675 }
5676 }
5677
5678 void
pmcs_report_fwversion(pmcs_hw_t * pwp)5679 pmcs_report_fwversion(pmcs_hw_t *pwp)
5680 {
5681 const char *fwsupport;
5682 switch (PMCS_FW_TYPE(pwp)) {
5683 case PMCS_FW_TYPE_RELEASED:
5684 fwsupport = "Released";
5685 break;
5686 case PMCS_FW_TYPE_DEVELOPMENT:
5687 fwsupport = "Development";
5688 break;
5689 case PMCS_FW_TYPE_ALPHA:
5690 fwsupport = "Alpha";
5691 break;
5692 case PMCS_FW_TYPE_BETA:
5693 fwsupport = "Beta";
5694 break;
5695 default:
5696 fwsupport = "Special";
5697 break;
5698 }
5699 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
5700 "Chip Revision: %c; F/W Revision %x.%x.%x %s (ILA rev %08x)",
5701 'A' + pwp->chiprev, PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp),
5702 PMCS_FW_MICRO(pwp), fwsupport, pwp->ila_ver);
5703 }
5704
5705 void
pmcs_phy_name(pmcs_hw_t * pwp,pmcs_phy_t * pptr,char * obuf,size_t olen)5706 pmcs_phy_name(pmcs_hw_t *pwp, pmcs_phy_t *pptr, char *obuf, size_t olen)
5707 {
5708 if (pptr->parent) {
5709 pmcs_phy_name(pwp, pptr->parent, obuf, olen);
5710 (void) snprintf(obuf, olen, "%s.%02x", obuf, pptr->phynum);
5711 } else {
5712 (void) snprintf(obuf, olen, "pp%02x", pptr->phynum);
5713 }
5714 }
5715
5716 /*
5717 * This function is called as a sanity check to ensure that a newly registered
5718 * PHY doesn't have a device_id that exists with another registered PHY.
5719 */
5720 static boolean_t
pmcs_validate_devid(pmcs_phy_t * parent,pmcs_phy_t * phyp,uint32_t device_id)5721 pmcs_validate_devid(pmcs_phy_t *parent, pmcs_phy_t *phyp, uint32_t device_id)
5722 {
5723 pmcs_phy_t *pptr, *pchild;
5724 boolean_t rval;
5725
5726 pptr = parent;
5727
5728 while (pptr) {
5729 if (pptr->valid_device_id && (pptr != phyp) &&
5730 (pptr->device_id == device_id)) {
5731 /*
5732 * This can still be OK if both of these PHYs actually
5733 * represent the same device (e.g. expander). It could
5734 * be a case of a new "primary" PHY. If the SAS address
5735 * is the same and they have the same parent, we'll
5736 * accept this if the PHY to be registered is the
5737 * primary.
5738 */
5739 if ((phyp->parent == pptr->parent) &&
5740 (memcmp(phyp->sas_address,
5741 pptr->sas_address, 8) == 0) && (phyp->width > 1)) {
5742 /*
5743 * Move children over to the new primary and
5744 * update both PHYs
5745 */
5746 pmcs_lock_phy(pptr);
5747 phyp->children = pptr->children;
5748 pchild = phyp->children;
5749 while (pchild) {
5750 pchild->parent = phyp;
5751 pchild = pchild->sibling;
5752 }
5753 phyp->subsidiary = 0;
5754 phyp->ncphy = pptr->ncphy;
5755 /*
5756 * device_id, valid_device_id, and configured
5757 * will be set by the caller
5758 */
5759 pptr->children = NULL;
5760 pptr->subsidiary = 1;
5761 pptr->ncphy = 0;
5762 pmcs_unlock_phy(pptr);
5763 pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG, pptr, NULL,
5764 "%s: Moving device_id %d from PHY %s to %s",
5765 __func__, device_id, pptr->path,
5766 phyp->path);
5767 return (B_TRUE);
5768 }
5769 pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG, pptr, NULL,
5770 "%s: phy %s already exists as %s with "
5771 "device id 0x%x", __func__, phyp->path,
5772 pptr->path, device_id);
5773 return (B_FALSE);
5774 }
5775
5776 if (pptr->children) {
5777 rval = pmcs_validate_devid(pptr->children, phyp,
5778 device_id);
5779 if (rval == B_FALSE) {
5780 return (rval);
5781 }
5782 }
5783
5784 pptr = pptr->sibling;
5785 }
5786
5787 /* This PHY and device_id are valid */
5788 return (B_TRUE);
5789 }
5790
5791 /*
5792 * If the PHY is found, it is returned locked
5793 */
5794 static pmcs_phy_t *
pmcs_find_phy_by_wwn_impl(pmcs_phy_t * phyp,uint8_t * wwn)5795 pmcs_find_phy_by_wwn_impl(pmcs_phy_t *phyp, uint8_t *wwn)
5796 {
5797 pmcs_phy_t *matched_phy, *cphyp, *nphyp;
5798
5799 ASSERT(!mutex_owned(&phyp->phy_lock));
5800
5801 while (phyp) {
5802 pmcs_lock_phy(phyp);
5803
5804 if (phyp->valid_device_id) {
5805 if (memcmp(phyp->sas_address, wwn, 8) == 0) {
5806 return (phyp);
5807 }
5808 }
5809
5810 if (phyp->children) {
5811 cphyp = phyp->children;
5812 pmcs_unlock_phy(phyp);
5813 matched_phy = pmcs_find_phy_by_wwn_impl(cphyp, wwn);
5814 if (matched_phy) {
5815 ASSERT(mutex_owned(&matched_phy->phy_lock));
5816 return (matched_phy);
5817 }
5818 pmcs_lock_phy(phyp);
5819 }
5820
5821 /*
5822 * Only iterate through non-root PHYs
5823 */
5824 if (IS_ROOT_PHY(phyp)) {
5825 pmcs_unlock_phy(phyp);
5826 phyp = NULL;
5827 } else {
5828 nphyp = phyp->sibling;
5829 pmcs_unlock_phy(phyp);
5830 phyp = nphyp;
5831 }
5832 }
5833
5834 return (NULL);
5835 }
5836
5837 pmcs_phy_t *
pmcs_find_phy_by_wwn(pmcs_hw_t * pwp,uint64_t wwn)5838 pmcs_find_phy_by_wwn(pmcs_hw_t *pwp, uint64_t wwn)
5839 {
5840 uint8_t ebstr[8];
5841 pmcs_phy_t *pptr, *matched_phy;
5842
5843 pmcs_wwn2barray(wwn, ebstr);
5844
5845 pptr = pwp->root_phys;
5846 while (pptr) {
5847 matched_phy = pmcs_find_phy_by_wwn_impl(pptr, ebstr);
5848 if (matched_phy) {
5849 ASSERT(mutex_owned(&matched_phy->phy_lock));
5850 return (matched_phy);
5851 }
5852
5853 pptr = pptr->sibling;
5854 }
5855
5856 return (NULL);
5857 }
5858
5859
5860 /*
5861 * pmcs_find_phy_by_sas_address
5862 *
5863 * Find a PHY that both matches "sas_addr" and is on "iport".
5864 * If a matching PHY is found, it is returned locked.
5865 */
5866 pmcs_phy_t *
pmcs_find_phy_by_sas_address(pmcs_hw_t * pwp,pmcs_iport_t * iport,pmcs_phy_t * root,char * sas_addr)5867 pmcs_find_phy_by_sas_address(pmcs_hw_t *pwp, pmcs_iport_t *iport,
5868 pmcs_phy_t *root, char *sas_addr)
5869 {
5870 int ua_form = 1;
5871 uint64_t wwn;
5872 char addr[PMCS_MAX_UA_SIZE];
5873 pmcs_phy_t *pptr, *pnext, *pchild;
5874
5875 if (root == NULL) {
5876 pptr = pwp->root_phys;
5877 } else {
5878 pptr = root;
5879 }
5880
5881 while (pptr) {
5882 pmcs_lock_phy(pptr);
5883 /*
5884 * If the PHY is dead or does not have a valid device ID,
5885 * skip it.
5886 */
5887 if ((pptr->dead) || (!pptr->valid_device_id)) {
5888 goto next_phy;
5889 }
5890
5891 if (pptr->iport != iport) {
5892 goto next_phy;
5893 }
5894
5895 wwn = pmcs_barray2wwn(pptr->sas_address);
5896 (void *) scsi_wwn_to_wwnstr(wwn, ua_form, addr);
5897 if (strncmp(addr, sas_addr, strlen(addr)) == 0) {
5898 return (pptr);
5899 }
5900
5901 if (pptr->children) {
5902 pchild = pptr->children;
5903 pmcs_unlock_phy(pptr);
5904 pnext = pmcs_find_phy_by_sas_address(pwp, iport, pchild,
5905 sas_addr);
5906 if (pnext) {
5907 return (pnext);
5908 }
5909 pmcs_lock_phy(pptr);
5910 }
5911
5912 next_phy:
5913 pnext = pptr->sibling;
5914 pmcs_unlock_phy(pptr);
5915 pptr = pnext;
5916 }
5917
5918 return (NULL);
5919 }
5920
5921 void
pmcs_fis_dump(pmcs_hw_t * pwp,fis_t fis)5922 pmcs_fis_dump(pmcs_hw_t *pwp, fis_t fis)
5923 {
5924 switch (fis[0] & 0xff) {
5925 case FIS_REG_H2DEV:
5926 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
5927 "FIS REGISTER HOST TO DEVICE: "
5928 "OP=0x%02x Feature=0x%04x Count=0x%04x Device=0x%02x "
5929 "LBA=%llu", BYTE2(fis[0]), BYTE3(fis[2]) << 8 |
5930 BYTE3(fis[0]), WORD0(fis[3]), BYTE3(fis[1]),
5931 (unsigned long long)
5932 (((uint64_t)fis[2] & 0x00ffffff) << 24 |
5933 ((uint64_t)fis[1] & 0x00ffffff)));
5934 break;
5935 case FIS_REG_D2H:
5936 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
5937 "FIS REGISTER DEVICE TO HOST: Status=0x%02x "
5938 "Error=0x%02x Dev=0x%02x Count=0x%04x LBA=%llu",
5939 BYTE2(fis[0]), BYTE3(fis[0]), BYTE3(fis[1]), WORD0(fis[3]),
5940 (unsigned long long)(((uint64_t)fis[2] & 0x00ffffff) << 24 |
5941 ((uint64_t)fis[1] & 0x00ffffff)));
5942 break;
5943 default:
5944 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL,
5945 "FIS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x",
5946 fis[0], fis[1], fis[2], fis[3], fis[4]);
5947 break;
5948 }
5949 }
5950
5951 void
pmcs_print_entry(pmcs_hw_t * pwp,int level,char * msg,void * arg)5952 pmcs_print_entry(pmcs_hw_t *pwp, int level, char *msg, void *arg)
5953 {
5954 uint32_t *mb = arg;
5955 size_t i;
5956
5957 pmcs_prt(pwp, level, NULL, NULL, msg);
5958 for (i = 0; i < (PMCS_QENTRY_SIZE / sizeof (uint32_t)); i += 4) {
5959 pmcs_prt(pwp, level, NULL, NULL,
5960 "Offset %2lu: 0x%08x 0x%08x 0x%08x 0x%08x",
5961 i * sizeof (uint32_t), LE_32(mb[i]),
5962 LE_32(mb[i+1]), LE_32(mb[i+2]), LE_32(mb[i+3]));
5963 }
5964 }
5965
5966 /*
5967 * If phyp == NULL we're being called from the worker thread, in which
5968 * case we need to check all the PHYs. In this case, the softstate lock
5969 * will be held.
5970 * If phyp is non-NULL, just issue the spinup release for the specified PHY
5971 * (which will already be locked).
5972 */
5973 void
pmcs_spinup_release(pmcs_hw_t * pwp,pmcs_phy_t * phyp)5974 pmcs_spinup_release(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
5975 {
5976 uint32_t *msg;
5977 struct pmcwork *pwrk;
5978 pmcs_phy_t *tphyp;
5979
5980 if (phyp != NULL) {
5981 ASSERT(mutex_owned(&phyp->phy_lock));
5982 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL,
5983 "%s: Issuing spinup release only for PHY %s", __func__,
5984 phyp->path);
5985 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5986 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
5987 if (msg == NULL || (pwrk =
5988 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) {
5989 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
5990 SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE);
5991 return;
5992 }
5993
5994 phyp->spinup_hold = 0;
5995 bzero(msg, PMCS_QENTRY_SIZE);
5996 pwrk->htag |= PMCS_TAG_NONIO_CMD;
5997 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
5998 PMCIN_LOCAL_PHY_CONTROL));
5999 msg[1] = LE_32(pwrk->htag);
6000 msg[2] = LE_32((0x10 << 8) | phyp->phynum);
6001
6002 pwrk->dtype = phyp->dtype;
6003 pwrk->state = PMCS_WORK_STATE_ONCHIP;
6004 pwrk->xp = phyp->target;
6005 mutex_exit(&pwrk->lock);
6006 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
6007 return;
6008 }
6009
6010 ASSERT(mutex_owned(&pwp->lock));
6011
6012 tphyp = pwp->root_phys;
6013 while (tphyp) {
6014 pmcs_lock_phy(tphyp);
6015 if (tphyp->spinup_hold == 0) {
6016 pmcs_unlock_phy(tphyp);
6017 tphyp = tphyp->sibling;
6018 continue;
6019 }
6020
6021 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, tphyp, NULL,
6022 "%s: Issuing spinup release for PHY %s", __func__,
6023 tphyp->path);
6024
6025 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
6026 msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
6027 if (msg == NULL || (pwrk =
6028 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) {
6029 pmcs_unlock_phy(tphyp);
6030 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
6031 SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE);
6032 break;
6033 }
6034
6035 tphyp->spinup_hold = 0;
6036 bzero(msg, PMCS_QENTRY_SIZE);
6037 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
6038 PMCIN_LOCAL_PHY_CONTROL));
6039 msg[1] = LE_32(pwrk->htag);
6040 msg[2] = LE_32((0x10 << 8) | tphyp->phynum);
6041
6042 pwrk->dtype = tphyp->dtype;
6043 pwrk->state = PMCS_WORK_STATE_ONCHIP;
6044 pwrk->xp = tphyp->target;
6045 mutex_exit(&pwrk->lock);
6046 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
6047 pmcs_unlock_phy(tphyp);
6048
6049 tphyp = tphyp->sibling;
6050 }
6051 }
6052
6053 /*
6054 * Abort commands on dead PHYs and deregister them as well as removing
6055 * the associated targets.
6056 */
6057 static int
pmcs_kill_devices(pmcs_hw_t * pwp,pmcs_phy_t * phyp)6058 pmcs_kill_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
6059 {
6060 pmcs_phy_t *pnext, *pchild;
6061 boolean_t remove_device;
6062 int rval = 0;
6063
6064 while (phyp) {
6065 pmcs_lock_phy(phyp);
6066 pchild = phyp->children;
6067 pnext = phyp->sibling;
6068 pmcs_unlock_phy(phyp);
6069
6070 if (pchild) {
6071 rval = pmcs_kill_devices(pwp, pchild);
6072 if (rval) {
6073 return (rval);
6074 }
6075 }
6076
6077 mutex_enter(&pwp->lock);
6078 pmcs_lock_phy(phyp);
6079 if (phyp->dead && phyp->valid_device_id) {
6080 remove_device = B_TRUE;
6081 } else {
6082 remove_device = B_FALSE;
6083 }
6084
6085 if (remove_device) {
6086 pmcs_remove_device(pwp, phyp);
6087 mutex_exit(&pwp->lock);
6088
6089 rval = pmcs_kill_device(pwp, phyp);
6090 if (rval) {
6091 pmcs_unlock_phy(phyp);
6092 return (rval);
6093 }
6094 } else {
6095 mutex_exit(&pwp->lock);
6096 }
6097
6098 pmcs_unlock_phy(phyp);
6099 phyp = pnext;
6100 }
6101
6102 return (rval);
6103 }
6104
6105 /*
6106 * Called with PHY locked
6107 */
6108 int
pmcs_kill_device(pmcs_hw_t * pwp,pmcs_phy_t * pptr)6109 pmcs_kill_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
6110 {
6111 int rval;
6112
6113 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, "kill %s device @ %s",
6114 pmcs_get_typename(pptr->dtype), pptr->path);
6115
6116 /*
6117 * There may be an outstanding ABORT_ALL running, which we wouldn't
6118 * know just by checking abort_pending. We can, however, check
6119 * abort_all_start. If it's non-zero, there is one, and we'll just
6120 * sit here and wait for it to complete. If we don't, we'll remove
6121 * the device while there are still commands pending.
6122 */
6123 if (pptr->abort_all_start) {
6124 while (pptr->abort_all_start) {
6125 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
6126 "%s: Waiting for outstanding ABORT_ALL on PHY 0x%p",
6127 __func__, (void *)pptr);
6128 cv_wait(&pptr->abort_all_cv, &pptr->phy_lock);
6129 }
6130 } else if (pptr->abort_pending) {
6131 rval = pmcs_abort(pwp, pptr, pptr->device_id, 1, 1);
6132 if (rval) {
6133 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL,
6134 "%s: ABORT_ALL returned non-zero status (%d) for "
6135 "PHY 0x%p", __func__, rval, (void *)pptr);
6136 return (rval);
6137 }
6138 pptr->abort_pending = 0;
6139 }
6140
6141 if (pptr->valid_device_id) {
6142 pmcs_deregister_device(pwp, pptr);
6143 }
6144
6145 PHY_CHANGED(pwp, pptr);
6146 RESTART_DISCOVERY(pwp);
6147 pptr->valid_device_id = 0;
6148 return (0);
6149 }
6150
6151 /*
6152 * Acknowledge the SAS h/w events that need acknowledgement.
6153 * This is only needed for first level PHYs.
6154 */
6155 void
pmcs_ack_events(pmcs_hw_t * pwp)6156 pmcs_ack_events(pmcs_hw_t *pwp)
6157 {
6158 uint32_t msg[PMCS_MSG_SIZE], *ptr;
6159 struct pmcwork *pwrk;
6160 pmcs_phy_t *pptr;
6161
6162 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
6163 pmcs_lock_phy(pptr);
6164 if (pptr->hw_event_ack == 0) {
6165 pmcs_unlock_phy(pptr);
6166 continue;
6167 }
6168 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
6169 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
6170
6171 if ((ptr == NULL) || (pwrk =
6172 pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) {
6173 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
6174 pmcs_unlock_phy(pptr);
6175 SCHEDULE_WORK(pwp, PMCS_WORK_SAS_HW_ACK);
6176 break;
6177 }
6178
6179 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
6180 PMCIN_SAS_HW_EVENT_ACK));
6181 msg[1] = LE_32(pwrk->htag);
6182 msg[2] = LE_32(pptr->hw_event_ack);
6183
6184 mutex_exit(&pwrk->lock);
6185 pwrk->dtype = pptr->dtype;
6186 pptr->hw_event_ack = 0;
6187 COPY_MESSAGE(ptr, msg, 3);
6188 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
6189 pmcs_unlock_phy(pptr);
6190 }
6191 }
6192
6193 /*
6194 * Load DMA
6195 */
6196 int
pmcs_dma_load(pmcs_hw_t * pwp,pmcs_cmd_t * sp,uint32_t * msg)6197 pmcs_dma_load(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint32_t *msg)
6198 {
6199 ddi_dma_cookie_t *sg;
6200 pmcs_dmachunk_t *tc;
6201 pmcs_dmasgl_t *sgl, *prior;
6202 int seg, tsc;
6203 uint64_t sgl_addr;
6204
6205 /*
6206 * If we have no data segments, we're done.
6207 */
6208 if (CMD2PKT(sp)->pkt_numcookies == 0) {
6209 return (0);
6210 }
6211
6212 /*
6213 * Get the S/G list pointer.
6214 */
6215 sg = CMD2PKT(sp)->pkt_cookies;
6216
6217 /*
6218 * If we only have one dma segment, we can directly address that
6219 * data within the Inbound message itself.
6220 */
6221 if (CMD2PKT(sp)->pkt_numcookies == 1) {
6222 msg[12] = LE_32(DWORD0(sg->dmac_laddress));
6223 msg[13] = LE_32(DWORD1(sg->dmac_laddress));
6224 msg[14] = LE_32(sg->dmac_size);
6225 msg[15] = 0;
6226 return (0);
6227 }
6228
6229 /*
6230 * Otherwise, we'll need one or more external S/G list chunks.
6231 * Get the first one and its dma address into the Inbound message.
6232 */
6233 mutex_enter(&pwp->dma_lock);
6234 tc = pwp->dma_freelist;
6235 if (tc == NULL) {
6236 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS);
6237 mutex_exit(&pwp->dma_lock);
6238 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL,
6239 "%s: out of SG lists", __func__);
6240 return (-1);
6241 }
6242 pwp->dma_freelist = tc->nxt;
6243 mutex_exit(&pwp->dma_lock);
6244
6245 tc->nxt = NULL;
6246 sp->cmd_clist = tc;
6247 sgl = tc->chunks;
6248 (void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ);
6249 sgl_addr = tc->addr;
6250 msg[12] = LE_32(DWORD0(sgl_addr));
6251 msg[13] = LE_32(DWORD1(sgl_addr));
6252 msg[14] = 0;
6253 msg[15] = LE_32(PMCS_DMASGL_EXTENSION);
6254
6255 prior = sgl;
6256 tsc = 0;
6257
6258 for (seg = 0; seg < CMD2PKT(sp)->pkt_numcookies; seg++) {
6259 /*
6260 * If the current segment count for this chunk is one less than
6261 * the number s/g lists per chunk and we have more than one seg
6262 * to go, we need another chunk. Get it, and make sure that the
6263 * tail end of the the previous chunk points the new chunk
6264 * (if remembering an offset can be called 'pointing to').
6265 *
6266 * Note that we can store the offset into our command area that
6267 * represents the new chunk in the length field of the part
6268 * that points the PMC chip at the next chunk- the PMC chip
6269 * ignores this field when the EXTENSION bit is set.
6270 *
6271 * This is required for dma unloads later.
6272 */
6273 if (tsc == (PMCS_SGL_NCHUNKS - 1) &&
6274 seg < (CMD2PKT(sp)->pkt_numcookies - 1)) {
6275 mutex_enter(&pwp->dma_lock);
6276 tc = pwp->dma_freelist;
6277 if (tc == NULL) {
6278 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS);
6279 mutex_exit(&pwp->dma_lock);
6280 pmcs_dma_unload(pwp, sp);
6281 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL,
6282 "%s: out of SG lists", __func__);
6283 return (-1);
6284 }
6285 pwp->dma_freelist = tc->nxt;
6286 tc->nxt = sp->cmd_clist;
6287 mutex_exit(&pwp->dma_lock);
6288
6289 sp->cmd_clist = tc;
6290 (void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ);
6291 sgl = tc->chunks;
6292 sgl_addr = tc->addr;
6293 prior[PMCS_SGL_NCHUNKS-1].sglal =
6294 LE_32(DWORD0(sgl_addr));
6295 prior[PMCS_SGL_NCHUNKS-1].sglah =
6296 LE_32(DWORD1(sgl_addr));
6297 prior[PMCS_SGL_NCHUNKS-1].sglen = 0;
6298 prior[PMCS_SGL_NCHUNKS-1].flags =
6299 LE_32(PMCS_DMASGL_EXTENSION);
6300 prior = sgl;
6301 tsc = 0;
6302 }
6303 sgl[tsc].sglal = LE_32(DWORD0(sg->dmac_laddress));
6304 sgl[tsc].sglah = LE_32(DWORD1(sg->dmac_laddress));
6305 sgl[tsc].sglen = LE_32(sg->dmac_size);
6306 sgl[tsc++].flags = 0;
6307 sg++;
6308 }
6309 return (0);
6310 }
6311
6312 /*
6313 * Unload DMA
6314 */
6315 void
pmcs_dma_unload(pmcs_hw_t * pwp,pmcs_cmd_t * sp)6316 pmcs_dma_unload(pmcs_hw_t *pwp, pmcs_cmd_t *sp)
6317 {
6318 pmcs_dmachunk_t *cp;
6319
6320 mutex_enter(&pwp->dma_lock);
6321 while ((cp = sp->cmd_clist) != NULL) {
6322 sp->cmd_clist = cp->nxt;
6323 cp->nxt = pwp->dma_freelist;
6324 pwp->dma_freelist = cp;
6325 }
6326 mutex_exit(&pwp->dma_lock);
6327 }
6328
6329 /*
6330 * Take a chunk of consistent memory that has just been allocated and inserted
6331 * into the cip indices and prepare it for DMA chunk usage and add it to the
6332 * freelist.
6333 *
6334 * Called with dma_lock locked (except during attach when it's unnecessary)
6335 */
6336 void
pmcs_idma_chunks(pmcs_hw_t * pwp,pmcs_dmachunk_t * dcp,pmcs_chunk_t * pchunk,unsigned long lim)6337 pmcs_idma_chunks(pmcs_hw_t *pwp, pmcs_dmachunk_t *dcp,
6338 pmcs_chunk_t *pchunk, unsigned long lim)
6339 {
6340 unsigned long off, n;
6341 pmcs_dmachunk_t *np = dcp;
6342 pmcs_chunk_t *tmp_chunk;
6343
6344 if (pwp->dma_chunklist == NULL) {
6345 pwp->dma_chunklist = pchunk;
6346 } else {
6347 tmp_chunk = pwp->dma_chunklist;
6348 while (tmp_chunk->next) {
6349 tmp_chunk = tmp_chunk->next;
6350 }
6351 tmp_chunk->next = pchunk;
6352 }
6353
6354 /*
6355 * Install offsets into chunk lists.
6356 */
6357 for (n = 0, off = 0; off < lim; off += PMCS_SGL_CHUNKSZ, n++) {
6358 np->chunks = (void *)&pchunk->addrp[off];
6359 np->addr = pchunk->dma_addr + off;
6360 np->acc_handle = pchunk->acc_handle;
6361 np->dma_handle = pchunk->dma_handle;
6362 if ((off + PMCS_SGL_CHUNKSZ) < lim) {
6363 np = np->nxt;
6364 }
6365 }
6366 np->nxt = pwp->dma_freelist;
6367 pwp->dma_freelist = dcp;
6368 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL,
6369 "added %lu DMA chunks ", n);
6370 }
6371
6372 /*
6373 * Change the value of the interrupt coalescing timer. This is done currently
6374 * only for I/O completions. If we're using the "auto clear" feature, it can
6375 * be turned back on when interrupt coalescing is turned off and must be
6376 * turned off when the coalescing timer is on.
6377 * NOTE: PMCS_MSIX_GENERAL and PMCS_OQ_IODONE are the same value. As long
6378 * as that's true, we don't need to distinguish between them.
6379 */
6380
6381 void
pmcs_set_intr_coal_timer(pmcs_hw_t * pwp,pmcs_coal_timer_adj_t adj)6382 pmcs_set_intr_coal_timer(pmcs_hw_t *pwp, pmcs_coal_timer_adj_t adj)
6383 {
6384 if (adj == DECREASE_TIMER) {
6385 /* If the timer is already off, nothing to do. */
6386 if (pwp->io_intr_coal.timer_on == B_FALSE) {
6387 return;
6388 }
6389
6390 pwp->io_intr_coal.intr_coal_timer -= PMCS_COAL_TIMER_GRAN;
6391
6392 if (pwp->io_intr_coal.intr_coal_timer == 0) {
6393 /* Disable the timer */
6394 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL, 0);
6395
6396 if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) {
6397 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR,
6398 pwp->odb_auto_clear);
6399 }
6400
6401 pwp->io_intr_coal.timer_on = B_FALSE;
6402 pwp->io_intr_coal.max_io_completions = B_FALSE;
6403 pwp->io_intr_coal.num_intrs = 0;
6404 pwp->io_intr_coal.int_cleared = B_FALSE;
6405 pwp->io_intr_coal.num_io_completions = 0;
6406
6407 DTRACE_PROBE1(pmcs__intr__coalesce__timer__off,
6408 pmcs_io_intr_coal_t *, &pwp->io_intr_coal);
6409 } else {
6410 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER,
6411 pwp->io_intr_coal.intr_coal_timer);
6412 }
6413 } else {
6414 /*
6415 * If the timer isn't on yet, do the setup for it now.
6416 */
6417 if (pwp->io_intr_coal.timer_on == B_FALSE) {
6418 /* If auto clear is being used, turn it off. */
6419 if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) {
6420 pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR,
6421 (pwp->odb_auto_clear &
6422 ~(1 << PMCS_MSIX_IODONE)));
6423 }
6424
6425 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL,
6426 (1 << PMCS_MSIX_IODONE));
6427 pwp->io_intr_coal.timer_on = B_TRUE;
6428 pwp->io_intr_coal.intr_coal_timer =
6429 PMCS_COAL_TIMER_GRAN;
6430
6431 DTRACE_PROBE1(pmcs__intr__coalesce__timer__on,
6432 pmcs_io_intr_coal_t *, &pwp->io_intr_coal);
6433 } else {
6434 pwp->io_intr_coal.intr_coal_timer +=
6435 PMCS_COAL_TIMER_GRAN;
6436 }
6437
6438 if (pwp->io_intr_coal.intr_coal_timer > PMCS_MAX_COAL_TIMER) {
6439 pwp->io_intr_coal.intr_coal_timer = PMCS_MAX_COAL_TIMER;
6440 }
6441
6442 pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER,
6443 pwp->io_intr_coal.intr_coal_timer);
6444 }
6445
6446 /*
6447 * Adjust the interrupt threshold based on the current timer value
6448 */
6449 pwp->io_intr_coal.intr_threshold =
6450 PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 /
6451 (pwp->io_intr_coal.intr_latency +
6452 (pwp->io_intr_coal.intr_coal_timer * 1000)));
6453 }
6454
6455 /*
6456 * Register Access functions
6457 */
6458 uint32_t
pmcs_rd_iqci(pmcs_hw_t * pwp,uint32_t qnum)6459 pmcs_rd_iqci(pmcs_hw_t *pwp, uint32_t qnum)
6460 {
6461 uint32_t iqci;
6462
6463 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
6464 DDI_SUCCESS) {
6465 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6466 "%s: ddi_dma_sync failed?", __func__);
6467 }
6468
6469 iqci = LE_32(
6470 ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2]);
6471
6472 return (iqci);
6473 }
6474
6475 uint32_t
pmcs_rd_oqpi(pmcs_hw_t * pwp,uint32_t qnum)6476 pmcs_rd_oqpi(pmcs_hw_t *pwp, uint32_t qnum)
6477 {
6478 uint32_t oqpi;
6479
6480 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
6481 DDI_SUCCESS) {
6482 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6483 "%s: ddi_dma_sync failed?", __func__);
6484 }
6485
6486 oqpi = LE_32(
6487 ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2]);
6488
6489 return (oqpi);
6490 }
6491
6492 uint32_t
pmcs_rd_gsm_reg(pmcs_hw_t * pwp,uint8_t hi,uint32_t off)6493 pmcs_rd_gsm_reg(pmcs_hw_t *pwp, uint8_t hi, uint32_t off)
6494 {
6495 uint32_t rv, newaxil, oldaxil, oldaxih;
6496
6497 newaxil = off & ~GSM_BASE_MASK;
6498 off &= GSM_BASE_MASK;
6499 mutex_enter(&pwp->axil_lock);
6500 oldaxil = ddi_get32(pwp->top_acc_handle,
6501 &pwp->top_regs[PMCS_AXI_TRANS >> 2]);
6502 ddi_put32(pwp->top_acc_handle,
6503 &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil);
6504 drv_usecwait(10);
6505 if (ddi_get32(pwp->top_acc_handle,
6506 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) {
6507 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6508 "AXIL register update failed");
6509 }
6510 if (hi) {
6511 oldaxih = ddi_get32(pwp->top_acc_handle,
6512 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2]);
6513 ddi_put32(pwp->top_acc_handle,
6514 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2], hi);
6515 drv_usecwait(10);
6516 if (ddi_get32(pwp->top_acc_handle,
6517 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2]) != hi) {
6518 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6519 "AXIH register update failed");
6520 }
6521 }
6522 rv = ddi_get32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2]);
6523 if (hi) {
6524 ddi_put32(pwp->top_acc_handle,
6525 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2], oldaxih);
6526 drv_usecwait(10);
6527 if (ddi_get32(pwp->top_acc_handle,
6528 &pwp->top_regs[PMCS_AXI_TRANS_UPPER >> 2]) != oldaxih) {
6529 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6530 "AXIH register restore failed");
6531 }
6532 }
6533 ddi_put32(pwp->top_acc_handle,
6534 &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil);
6535 drv_usecwait(10);
6536 if (ddi_get32(pwp->top_acc_handle,
6537 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) {
6538 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6539 "AXIL register restore failed");
6540 }
6541 mutex_exit(&pwp->axil_lock);
6542 return (rv);
6543 }
6544
6545 void
pmcs_wr_gsm_reg(pmcs_hw_t * pwp,uint32_t off,uint32_t val)6546 pmcs_wr_gsm_reg(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6547 {
6548 uint32_t newaxil, oldaxil;
6549
6550 newaxil = off & ~GSM_BASE_MASK;
6551 off &= GSM_BASE_MASK;
6552 mutex_enter(&pwp->axil_lock);
6553 oldaxil = ddi_get32(pwp->top_acc_handle,
6554 &pwp->top_regs[PMCS_AXI_TRANS >> 2]);
6555 ddi_put32(pwp->top_acc_handle,
6556 &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil);
6557 drv_usecwait(10);
6558 if (ddi_get32(pwp->top_acc_handle,
6559 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) {
6560 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6561 "AXIL register update failed");
6562 }
6563 ddi_put32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2], val);
6564 ddi_put32(pwp->top_acc_handle,
6565 &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil);
6566 drv_usecwait(10);
6567 if (ddi_get32(pwp->top_acc_handle,
6568 &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) {
6569 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6570 "AXIL register restore failed");
6571 }
6572 mutex_exit(&pwp->axil_lock);
6573 }
6574
6575 uint32_t
pmcs_rd_topunit(pmcs_hw_t * pwp,uint32_t off)6576 pmcs_rd_topunit(pmcs_hw_t *pwp, uint32_t off)
6577 {
6578 switch (off) {
6579 case PMCS_SPC_RESET:
6580 case PMCS_SPC_BOOT_STRAP:
6581 case PMCS_SPC_DEVICE_ID:
6582 case PMCS_DEVICE_REVISION:
6583 off = pmcs_rd_gsm_reg(pwp, 0, off);
6584 break;
6585 default:
6586 off = ddi_get32(pwp->top_acc_handle,
6587 &pwp->top_regs[off >> 2]);
6588 break;
6589 }
6590 return (off);
6591 }
6592
6593 void
pmcs_wr_topunit(pmcs_hw_t * pwp,uint32_t off,uint32_t val)6594 pmcs_wr_topunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6595 {
6596 switch (off) {
6597 case PMCS_SPC_RESET:
6598 case PMCS_DEVICE_REVISION:
6599 pmcs_wr_gsm_reg(pwp, off, val);
6600 break;
6601 default:
6602 ddi_put32(pwp->top_acc_handle, &pwp->top_regs[off >> 2], val);
6603 break;
6604 }
6605 }
6606
6607 uint32_t
pmcs_rd_msgunit(pmcs_hw_t * pwp,uint32_t off)6608 pmcs_rd_msgunit(pmcs_hw_t *pwp, uint32_t off)
6609 {
6610 return (ddi_get32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2]));
6611 }
6612
6613 uint32_t
pmcs_rd_mpi_tbl(pmcs_hw_t * pwp,uint32_t off)6614 pmcs_rd_mpi_tbl(pmcs_hw_t *pwp, uint32_t off)
6615 {
6616 return (ddi_get32(pwp->mpi_acc_handle,
6617 &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2]));
6618 }
6619
6620 uint32_t
pmcs_rd_gst_tbl(pmcs_hw_t * pwp,uint32_t off)6621 pmcs_rd_gst_tbl(pmcs_hw_t *pwp, uint32_t off)
6622 {
6623 return (ddi_get32(pwp->mpi_acc_handle,
6624 &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2]));
6625 }
6626
6627 uint32_t
pmcs_rd_iqc_tbl(pmcs_hw_t * pwp,uint32_t off)6628 pmcs_rd_iqc_tbl(pmcs_hw_t *pwp, uint32_t off)
6629 {
6630 return (ddi_get32(pwp->mpi_acc_handle,
6631 &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2]));
6632 }
6633
6634 uint32_t
pmcs_rd_oqc_tbl(pmcs_hw_t * pwp,uint32_t off)6635 pmcs_rd_oqc_tbl(pmcs_hw_t *pwp, uint32_t off)
6636 {
6637 return (ddi_get32(pwp->mpi_acc_handle,
6638 &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2]));
6639 }
6640
6641 uint32_t
pmcs_rd_iqpi(pmcs_hw_t * pwp,uint32_t qnum)6642 pmcs_rd_iqpi(pmcs_hw_t *pwp, uint32_t qnum)
6643 {
6644 return (ddi_get32(pwp->mpi_acc_handle,
6645 &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2]));
6646 }
6647
6648 uint32_t
pmcs_rd_oqci(pmcs_hw_t * pwp,uint32_t qnum)6649 pmcs_rd_oqci(pmcs_hw_t *pwp, uint32_t qnum)
6650 {
6651 return (ddi_get32(pwp->mpi_acc_handle,
6652 &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2]));
6653 }
6654
6655 void
pmcs_wr_msgunit(pmcs_hw_t * pwp,uint32_t off,uint32_t val)6656 pmcs_wr_msgunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6657 {
6658 ddi_put32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2], val);
6659 }
6660
6661 void
pmcs_wr_mpi_tbl(pmcs_hw_t * pwp,uint32_t off,uint32_t val)6662 pmcs_wr_mpi_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6663 {
6664 ddi_put32(pwp->mpi_acc_handle,
6665 &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2], (val));
6666 }
6667
6668 void
pmcs_wr_gst_tbl(pmcs_hw_t * pwp,uint32_t off,uint32_t val)6669 pmcs_wr_gst_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6670 {
6671 ddi_put32(pwp->mpi_acc_handle,
6672 &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2], val);
6673 }
6674
6675 void
pmcs_wr_iqc_tbl(pmcs_hw_t * pwp,uint32_t off,uint32_t val)6676 pmcs_wr_iqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6677 {
6678 ddi_put32(pwp->mpi_acc_handle,
6679 &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2], val);
6680 }
6681
6682 void
pmcs_wr_oqc_tbl(pmcs_hw_t * pwp,uint32_t off,uint32_t val)6683 pmcs_wr_oqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
6684 {
6685 ddi_put32(pwp->mpi_acc_handle,
6686 &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2], val);
6687 }
6688
6689 void
pmcs_wr_iqci(pmcs_hw_t * pwp,uint32_t qnum,uint32_t val)6690 pmcs_wr_iqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val)
6691 {
6692 ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2] = val;
6693 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) !=
6694 DDI_SUCCESS) {
6695 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6696 "%s: ddi_dma_sync failed?", __func__);
6697 }
6698 }
6699
6700 void
pmcs_wr_iqpi(pmcs_hw_t * pwp,uint32_t qnum,uint32_t val)6701 pmcs_wr_iqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val)
6702 {
6703 ddi_put32(pwp->mpi_acc_handle,
6704 &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2], val);
6705 }
6706
6707 void
pmcs_wr_oqci(pmcs_hw_t * pwp,uint32_t qnum,uint32_t val)6708 pmcs_wr_oqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val)
6709 {
6710 ddi_put32(pwp->mpi_acc_handle,
6711 &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2], val);
6712 }
6713
6714 void
pmcs_wr_oqpi(pmcs_hw_t * pwp,uint32_t qnum,uint32_t val)6715 pmcs_wr_oqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val)
6716 {
6717 ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2] = val;
6718 if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) !=
6719 DDI_SUCCESS) {
6720 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6721 "%s: ddi_dma_sync failed?", __func__);
6722 }
6723 }
6724
6725 /*
6726 * Check the status value of an outbound IOMB and report anything bad
6727 */
6728
6729 void
pmcs_check_iomb_status(pmcs_hw_t * pwp,uint32_t * iomb)6730 pmcs_check_iomb_status(pmcs_hw_t *pwp, uint32_t *iomb)
6731 {
6732 uint16_t opcode;
6733 int offset;
6734
6735 if (iomb == NULL) {
6736 return;
6737 }
6738
6739 opcode = LE_32(iomb[0]) & 0xfff;
6740
6741 switch (opcode) {
6742 /*
6743 * The following have no status field, so ignore them
6744 */
6745 case PMCOUT_ECHO:
6746 case PMCOUT_SAS_HW_EVENT:
6747 case PMCOUT_GET_DEVICE_HANDLE:
6748 case PMCOUT_SATA_EVENT:
6749 case PMCOUT_SSP_EVENT:
6750 case PMCOUT_DEVICE_HANDLE_ARRIVED:
6751 case PMCOUT_GPIO:
6752 case PMCOUT_GPIO_EVENT:
6753 case PMCOUT_GET_TIME_STAMP:
6754 case PMCOUT_SKIP_ENTRIES:
6755 case PMCOUT_GET_NVMD_DATA: /* Actually lower 16 bits of word 3 */
6756 case PMCOUT_SET_NVMD_DATA: /* but ignore - we don't use these */
6757 case PMCOUT_DEVICE_HANDLE_REMOVED:
6758 case PMCOUT_SSP_REQUEST_RECEIVED:
6759 return;
6760
6761 case PMCOUT_GENERAL_EVENT:
6762 offset = 1;
6763 break;
6764
6765 case PMCOUT_SSP_COMPLETION:
6766 case PMCOUT_SMP_COMPLETION:
6767 case PMCOUT_DEVICE_REGISTRATION:
6768 case PMCOUT_DEREGISTER_DEVICE_HANDLE:
6769 case PMCOUT_SATA_COMPLETION:
6770 case PMCOUT_DEVICE_INFO:
6771 case PMCOUT_FW_FLASH_UPDATE:
6772 case PMCOUT_SSP_ABORT:
6773 case PMCOUT_SATA_ABORT:
6774 case PMCOUT_SAS_DIAG_MODE_START_END:
6775 case PMCOUT_SAS_HW_EVENT_ACK_ACK:
6776 case PMCOUT_SMP_ABORT:
6777 case PMCOUT_SET_DEVICE_STATE:
6778 case PMCOUT_GET_DEVICE_STATE:
6779 case PMCOUT_SET_DEVICE_INFO:
6780 offset = 2;
6781 break;
6782
6783 case PMCOUT_LOCAL_PHY_CONTROL:
6784 case PMCOUT_SAS_DIAG_EXECUTE:
6785 case PMCOUT_PORT_CONTROL:
6786 offset = 3;
6787 break;
6788
6789 case PMCOUT_GET_INFO:
6790 case PMCOUT_GET_VPD:
6791 case PMCOUT_SAS_ASSISTED_DISCOVERY_EVENT:
6792 case PMCOUT_SATA_ASSISTED_DISCOVERY_EVENT:
6793 case PMCOUT_SET_VPD:
6794 case PMCOUT_TWI:
6795 pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
6796 "Got response for deprecated opcode", iomb);
6797 return;
6798
6799 default:
6800 pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
6801 "Got response for unknown opcode", iomb);
6802 return;
6803 }
6804
6805 if (LE_32(iomb[offset]) != PMCOUT_STATUS_OK) {
6806 pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
6807 "bad status on TAG_TYPE_NONE command", iomb);
6808 }
6809 }
6810
6811 /*
6812 * Called with statlock held
6813 */
6814 void
pmcs_clear_xp(pmcs_hw_t * pwp,pmcs_xscsi_t * xp)6815 pmcs_clear_xp(pmcs_hw_t *pwp, pmcs_xscsi_t *xp)
6816 {
6817 _NOTE(ARGUNUSED(pwp));
6818
6819 ASSERT(mutex_owned(&xp->statlock));
6820
6821 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, "%s: Device 0x%p is gone.",
6822 __func__, (void *)xp);
6823
6824 xp->special_running = 0;
6825 xp->recovering = 0;
6826 xp->recover_wait = 0;
6827 xp->draining = 0;
6828 xp->new = 0;
6829 xp->assigned = 0;
6830 xp->dev_state = 0;
6831 xp->tagmap = 0;
6832 xp->dev_gone = 1;
6833 xp->event_recovery = 0;
6834 xp->dtype = NOTHING;
6835 xp->wq_recovery_tail = NULL;
6836 /* Don't clear xp->phy */
6837 /* Don't clear xp->actv_cnt */
6838 /* Don't clear xp->actv_pkts */
6839
6840 /*
6841 * Flush all target queues
6842 */
6843 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES);
6844 }
6845
6846 static int
pmcs_smp_function_result(pmcs_hw_t * pwp,smp_response_frame_t * srf)6847 pmcs_smp_function_result(pmcs_hw_t *pwp, smp_response_frame_t *srf)
6848 {
6849 int result = srf->srf_result;
6850
6851 switch (result) {
6852 case SMP_RES_UNKNOWN_FUNCTION:
6853 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6854 "%s: SMP DISCOVER Response "
6855 "Function Result: Unknown SMP Function(0x%x)",
6856 __func__, result);
6857 break;
6858 case SMP_RES_FUNCTION_FAILED:
6859 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6860 "%s: SMP DISCOVER Response "
6861 "Function Result: SMP Function Failed(0x%x)",
6862 __func__, result);
6863 break;
6864 case SMP_RES_INVALID_REQUEST_FRAME_LENGTH:
6865 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6866 "%s: SMP DISCOVER Response "
6867 "Function Result: Invalid Request Frame Length(0x%x)",
6868 __func__, result);
6869 break;
6870 case SMP_RES_INCOMPLETE_DESCRIPTOR_LIST:
6871 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6872 "%s: SMP DISCOVER Response "
6873 "Function Result: Incomplete Descriptor List(0x%x)",
6874 __func__, result);
6875 break;
6876 case SMP_RES_PHY_DOES_NOT_EXIST:
6877 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6878 "%s: SMP DISCOVER Response "
6879 "Function Result: PHY does not exist(0x%x)",
6880 __func__, result);
6881 break;
6882 case SMP_RES_PHY_VACANT:
6883 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6884 "%s: SMP DISCOVER Response "
6885 "Function Result: PHY Vacant(0x%x)",
6886 __func__, result);
6887 break;
6888 default:
6889 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6890 "%s: SMP DISCOVER Response "
6891 "Function Result: (0x%x)",
6892 __func__, result);
6893 break;
6894 }
6895
6896 return (result);
6897 }
6898
6899 /*
6900 * Do all the repetitive stuff necessary to setup for DMA
6901 *
6902 * pwp: Used for dip
6903 * dma_attr: ddi_dma_attr_t to use for the mapping
6904 * acch: ddi_acc_handle_t to use for the mapping
6905 * dmah: ddi_dma_handle_t to use
6906 * length: Amount of memory for mapping
6907 * kvap: Pointer filled in with kernel virtual address on successful return
6908 * dma_addr: Pointer filled in with DMA address on successful return
6909 */
6910 boolean_t
pmcs_dma_setup(pmcs_hw_t * pwp,ddi_dma_attr_t * dma_attr,ddi_acc_handle_t * acch,ddi_dma_handle_t * dmah,size_t length,caddr_t * kvap,uint64_t * dma_addr)6911 pmcs_dma_setup(pmcs_hw_t *pwp, ddi_dma_attr_t *dma_attr, ddi_acc_handle_t *acch,
6912 ddi_dma_handle_t *dmah, size_t length, caddr_t *kvap, uint64_t *dma_addr)
6913 {
6914 dev_info_t *dip = pwp->dip;
6915 ddi_dma_cookie_t cookie;
6916 size_t real_length;
6917 uint_t ddma_flag = DDI_DMA_CONSISTENT;
6918 uint_t ddabh_flag = DDI_DMA_CONSISTENT | DDI_DMA_RDWR;
6919 uint_t cookie_cnt;
6920 ddi_device_acc_attr_t mattr = {
6921 DDI_DEVICE_ATTR_V0,
6922 DDI_NEVERSWAP_ACC,
6923 DDI_STRICTORDER_ACC,
6924 DDI_DEFAULT_ACC
6925 };
6926
6927 *acch = NULL;
6928 *dmah = NULL;
6929
6930 if (ddi_dma_alloc_handle(dip, dma_attr, DDI_DMA_SLEEP, NULL, dmah) !=
6931 DDI_SUCCESS) {
6932 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6933 "Failed to allocate DMA handle");
6934 return (B_FALSE);
6935 }
6936
6937 if (ddi_dma_mem_alloc(*dmah, length, &mattr, ddma_flag, DDI_DMA_SLEEP,
6938 NULL, kvap, &real_length, acch) != DDI_SUCCESS) {
6939 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
6940 "Failed to allocate DMA mem");
6941 ddi_dma_free_handle(dmah);
6942 *dmah = NULL;
6943 return (B_FALSE);
6944 }
6945
6946 if (ddi_dma_addr_bind_handle(*dmah, NULL, *kvap, real_length,
6947 ddabh_flag, DDI_DMA_SLEEP, NULL, &cookie, &cookie_cnt)
6948 != DDI_DMA_MAPPED) {
6949 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Failed to bind DMA");
6950 ddi_dma_free_handle(dmah);
6951 ddi_dma_mem_free(acch);
6952 *dmah = NULL;
6953 *acch = NULL;
6954 return (B_FALSE);
6955 }
6956
6957 if (cookie_cnt != 1) {
6958 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Multiple cookies");
6959 if (ddi_dma_unbind_handle(*dmah) != DDI_SUCCESS) {
6960 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Condition "
6961 "failed at %s():%d", __func__, __LINE__);
6962 }
6963 ddi_dma_free_handle(dmah);
6964 ddi_dma_mem_free(acch);
6965 *dmah = NULL;
6966 *acch = NULL;
6967 return (B_FALSE);
6968 }
6969
6970 *dma_addr = cookie.dmac_laddress;
6971
6972 return (B_TRUE);
6973 }
6974
6975 /*
6976 * Flush requested queues for a particular target. Called with statlock held
6977 */
6978 void
pmcs_flush_target_queues(pmcs_hw_t * pwp,pmcs_xscsi_t * tgt,uint8_t queues)6979 pmcs_flush_target_queues(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt, uint8_t queues)
6980 {
6981 pmcs_cmd_t *sp, *sp_next;
6982 pmcwork_t *pwrk;
6983
6984 ASSERT(pwp != NULL);
6985 ASSERT(tgt != NULL);
6986
6987 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, tgt,
6988 "%s: Flushing queues (%d) for target 0x%p", __func__,
6989 queues, (void *)tgt);
6990
6991 /*
6992 * Commands on the wait queue (or the special queue below) don't have
6993 * work structures associated with them.
6994 */
6995 if (queues & PMCS_TGT_WAIT_QUEUE) {
6996 mutex_enter(&tgt->wqlock);
6997 while ((sp = STAILQ_FIRST(&tgt->wq)) != NULL) {
6998 STAILQ_REMOVE(&tgt->wq, sp, pmcs_cmd, cmd_next);
6999 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, tgt,
7000 "%s: Removing cmd 0x%p from wq for target 0x%p",
7001 __func__, (void *)sp, (void *)tgt);
7002 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
7003 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
7004 mutex_exit(&tgt->wqlock);
7005 pmcs_dma_unload(pwp, sp);
7006 mutex_enter(&pwp->cq_lock);
7007 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
7008 PMCS_CQ_RUN_LOCKED(pwp);
7009 mutex_exit(&pwp->cq_lock);
7010 mutex_enter(&tgt->wqlock);
7011 }
7012 mutex_exit(&tgt->wqlock);
7013 }
7014
7015 /*
7016 * Commands on the active queue will have work structures associated
7017 * with them.
7018 */
7019 if (queues & PMCS_TGT_ACTIVE_QUEUE) {
7020 mutex_exit(&tgt->statlock);
7021 mutex_enter(&tgt->aqlock);
7022 sp = STAILQ_FIRST(&tgt->aq);
7023 while (sp) {
7024 sp_next = STAILQ_NEXT(sp, cmd_next);
7025 pwrk = pmcs_tag2wp(pwp, sp->cmd_tag, B_FALSE);
7026
7027 /*
7028 * If we don't find a work structure, it's because
7029 * the command is already complete. If so, move on
7030 * to the next one.
7031 */
7032 if (pwrk == NULL) {
7033 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt,
7034 "%s: Not removing cmd 0x%p (htag 0x%x) "
7035 "from aq", __func__, (void *)sp,
7036 sp->cmd_tag);
7037 sp = sp_next;
7038 continue;
7039 }
7040
7041 STAILQ_REMOVE(&tgt->aq, sp, pmcs_cmd, cmd_next);
7042 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt,
7043 "%s: Removing cmd 0x%p (htag 0x%x) from aq for "
7044 "target 0x%p", __func__, (void *)sp, sp->cmd_tag,
7045 (void *)tgt);
7046 mutex_exit(&tgt->aqlock);
7047
7048 /*
7049 * Mark the work structure as dead and complete it
7050 */
7051 pwrk->dead = 1;
7052 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
7053 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
7054 pmcs_complete_work_impl(pwp, pwrk, NULL, 0);
7055 pmcs_dma_unload(pwp, sp);
7056 mutex_enter(&pwp->cq_lock);
7057 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
7058 mutex_exit(&pwp->cq_lock);
7059 mutex_enter(&tgt->aqlock);
7060 sp = sp_next;
7061 }
7062 mutex_exit(&tgt->aqlock);
7063 mutex_enter(&tgt->statlock);
7064 }
7065
7066 if (queues & PMCS_TGT_SPECIAL_QUEUE) {
7067 while ((sp = STAILQ_FIRST(&tgt->sq)) != NULL) {
7068 STAILQ_REMOVE(&tgt->sq, sp, pmcs_cmd, cmd_next);
7069 pmcs_prt(pwp, PMCS_PRT_DEBUG1, tgt->phy, tgt,
7070 "%s: Removing cmd 0x%p from sq for target 0x%p",
7071 __func__, (void *)sp, (void *)tgt);
7072 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
7073 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
7074 pmcs_dma_unload(pwp, sp);
7075 mutex_enter(&pwp->cq_lock);
7076 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
7077 mutex_exit(&pwp->cq_lock);
7078 }
7079 }
7080
7081 if (queues == PMCS_TGT_ALL_QUEUES) {
7082 mutex_exit(&tgt->statlock);
7083 pmcs_flush_nonio_cmds(pwp, tgt);
7084 mutex_enter(&tgt->statlock);
7085 }
7086 }
7087
7088 /*
7089 * Flush non-IO commands for this target. This cleans up the off-queue
7090 * work with no pmcs_cmd_t associated.
7091 */
7092 static void
pmcs_flush_nonio_cmds(pmcs_hw_t * pwp,pmcs_xscsi_t * tgt)7093 pmcs_flush_nonio_cmds(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt)
7094 {
7095 int i;
7096 pmcwork_t *p;
7097
7098 for (i = 0; i < pwp->max_cmd; i++) {
7099 p = &pwp->work[i];
7100 mutex_enter(&p->lock);
7101 if (p->xp != tgt) {
7102 mutex_exit(&p->lock);
7103 continue;
7104 }
7105 if (p->htag & PMCS_TAG_NONIO_CMD) {
7106 if (!PMCS_COMMAND_ACTIVE(p) || PMCS_COMMAND_DONE(p)) {
7107 mutex_exit(&p->lock);
7108 continue;
7109 }
7110 pmcs_prt(pwp, PMCS_PRT_DEBUG, p->phy, p->xp,
7111 "%s: Completing non-io cmd with HTAG 0x%x",
7112 __func__, p->htag);
7113 pmcs_complete_work_impl(pwp, p, NULL, 0);
7114 } else {
7115 mutex_exit(&p->lock);
7116 }
7117 }
7118 }
7119
7120 void
pmcs_complete_work_impl(pmcs_hw_t * pwp,pmcwork_t * pwrk,uint32_t * iomb,size_t amt)7121 pmcs_complete_work_impl(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *iomb,
7122 size_t amt)
7123 {
7124 pmcs_phy_t *pptr = NULL;
7125
7126 switch (PMCS_TAG_TYPE(pwrk->htag)) {
7127 case PMCS_TAG_TYPE_CBACK:
7128 {
7129 pmcs_cb_t callback = (pmcs_cb_t)pwrk->ptr;
7130 (*callback)(pwp, pwrk, iomb);
7131 break;
7132 }
7133 case PMCS_TAG_TYPE_WAIT:
7134 if (pwrk->arg && iomb && amt) {
7135 (void) memcpy(pwrk->arg, iomb, amt);
7136 }
7137 cv_signal(&pwrk->sleep_cv);
7138 mutex_exit(&pwrk->lock);
7139 break;
7140 case PMCS_TAG_TYPE_NONE:
7141 #ifdef DEBUG
7142 pmcs_check_iomb_status(pwp, iomb);
7143 #endif
7144 pptr = pwrk->phy;
7145 pmcs_pwork(pwp, pwrk);
7146
7147 /* If this was an abort all, clean up if needed */
7148 if ((pwrk->abt_htag == PMCS_ABT_HTAG_ALL) && (pptr != NULL)) {
7149 mutex_enter(&pptr->phy_lock);
7150 if (pptr->abort_all_start) {
7151 pptr->abort_all_start = 0;
7152 cv_signal(&pptr->abort_all_cv);
7153 }
7154 mutex_exit(&pptr->phy_lock);
7155 }
7156 break;
7157 default:
7158 /*
7159 * We will leak a structure here if we don't know
7160 * what happened
7161 */
7162 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
7163 "%s: Unknown PMCS_TAG_TYPE (%x)",
7164 __func__, PMCS_TAG_TYPE(pwrk->htag));
7165 break;
7166 }
7167 }
7168
7169 /*
7170 * Determine if iport still has targets. During detach(9E), if SCSA is
7171 * successfull in its guarantee of tran_tgt_free(9E) before detach(9E),
7172 * this should always return B_FALSE.
7173 */
7174 boolean_t
pmcs_iport_has_targets(pmcs_hw_t * pwp,pmcs_iport_t * iport)7175 pmcs_iport_has_targets(pmcs_hw_t *pwp, pmcs_iport_t *iport)
7176 {
7177 pmcs_xscsi_t *xp;
7178 int i;
7179
7180 mutex_enter(&pwp->lock);
7181
7182 if (!pwp->targets || !pwp->max_dev) {
7183 mutex_exit(&pwp->lock);
7184 return (B_FALSE);
7185 }
7186
7187 for (i = 0; i < pwp->max_dev; i++) {
7188 xp = pwp->targets[i];
7189 if ((xp == NULL) || (xp->phy == NULL) ||
7190 (xp->phy->iport != iport)) {
7191 continue;
7192 }
7193
7194 mutex_exit(&pwp->lock);
7195 return (B_TRUE);
7196 }
7197
7198 mutex_exit(&pwp->lock);
7199 return (B_FALSE);
7200 }
7201
7202 /*
7203 * Called with softstate lock held
7204 */
7205 void
pmcs_destroy_target(pmcs_xscsi_t * target)7206 pmcs_destroy_target(pmcs_xscsi_t *target)
7207 {
7208 pmcs_hw_t *pwp = target->pwp;
7209 pmcs_iport_t *iport;
7210
7211 ASSERT(pwp);
7212 ASSERT(mutex_owned(&pwp->lock));
7213
7214 if (!target->ua) {
7215 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, target,
7216 "%s: target %p iport address is null",
7217 __func__, (void *)target);
7218 }
7219
7220 iport = pmcs_get_iport_by_ua(pwp, target->ua);
7221 if (iport == NULL) {
7222 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, target,
7223 "%s: no iport associated with tgt(0x%p)",
7224 __func__, (void *)target);
7225 return;
7226 }
7227
7228 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target,
7229 "%s: free target %p", __func__, (void *)target);
7230 if (target->ua) {
7231 strfree(target->ua);
7232 }
7233
7234 mutex_destroy(&target->wqlock);
7235 mutex_destroy(&target->aqlock);
7236 mutex_destroy(&target->statlock);
7237 cv_destroy(&target->reset_cv);
7238 cv_destroy(&target->abort_cv);
7239 ddi_soft_state_bystr_fini(&target->lun_sstate);
7240 ddi_soft_state_bystr_free(iport->tgt_sstate, target->unit_address);
7241 pmcs_rele_iport(iport);
7242 }
7243
7244 /*
7245 * pmcs_lock_phy_impl
7246 *
7247 * This function is what does the actual work for pmcs_lock_phy. It will
7248 * lock all PHYs from phyp down in a top-down fashion.
7249 *
7250 * Locking notes:
7251 * 1. level starts from 0 for the PHY ("parent") that's passed in. It is
7252 * not a reflection of the actual level of the PHY in the SAS topology.
7253 * 2. If parent is an expander, then parent is locked along with all its
7254 * descendents.
7255 * 3. Expander subsidiary PHYs at level 0 are not locked. It is the
7256 * responsibility of the caller to individually lock expander subsidiary PHYs
7257 * at level 0 if necessary.
7258 * 4. Siblings at level 0 are not traversed due to the possibility that we're
7259 * locking a PHY on the dead list. The siblings could be pointing to invalid
7260 * PHYs. We don't lock siblings at level 0 anyway.
7261 */
7262 static void
pmcs_lock_phy_impl(pmcs_phy_t * phyp,int level)7263 pmcs_lock_phy_impl(pmcs_phy_t *phyp, int level)
7264 {
7265 pmcs_phy_t *tphyp;
7266
7267 ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) ||
7268 (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING));
7269
7270 /*
7271 * Start walking the PHYs.
7272 */
7273 tphyp = phyp;
7274 while (tphyp) {
7275 /*
7276 * If we're at the top level, only lock ourselves. For anything
7277 * at level > 0, traverse children while locking everything.
7278 */
7279 if ((level > 0) || (tphyp == phyp)) {
7280 pmcs_prt(tphyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, tphyp,
7281 NULL, "%s: PHY 0x%p parent 0x%p path %s lvl %d",
7282 __func__, (void *)tphyp, (void *)tphyp->parent,
7283 tphyp->path, level);
7284 mutex_enter(&tphyp->phy_lock);
7285
7286 if (tphyp->children) {
7287 pmcs_lock_phy_impl(tphyp->children, level + 1);
7288 }
7289 }
7290
7291 if (level == 0) {
7292 return;
7293 }
7294
7295 tphyp = tphyp->sibling;
7296 }
7297 }
7298
7299 /*
7300 * pmcs_lock_phy
7301 *
7302 * This function is responsible for locking a PHY and all its descendents
7303 */
7304 void
pmcs_lock_phy(pmcs_phy_t * phyp)7305 pmcs_lock_phy(pmcs_phy_t *phyp)
7306 {
7307 #ifdef DEBUG
7308 char *callername = NULL;
7309 ulong_t off;
7310
7311 ASSERT(phyp != NULL);
7312
7313 callername = modgetsymname((uintptr_t)caller(), &off);
7314
7315 if (callername == NULL) {
7316 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL,
7317 "%s: PHY 0x%p path %s caller: unknown", __func__,
7318 (void *)phyp, phyp->path);
7319 } else {
7320 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL,
7321 "%s: PHY 0x%p path %s caller: %s+%lx", __func__,
7322 (void *)phyp, phyp->path, callername, off);
7323 }
7324 #else
7325 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL,
7326 "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path);
7327 #endif
7328 pmcs_lock_phy_impl(phyp, 0);
7329 }
7330
7331 /*
7332 * pmcs_unlock_phy_impl
7333 *
7334 * Unlock all PHYs from phyp down in a bottom-up fashion.
7335 */
7336 static void
pmcs_unlock_phy_impl(pmcs_phy_t * phyp,int level)7337 pmcs_unlock_phy_impl(pmcs_phy_t *phyp, int level)
7338 {
7339 pmcs_phy_t *phy_next;
7340
7341 ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) ||
7342 (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING));
7343
7344 /*
7345 * Recurse down to the bottom PHYs
7346 */
7347 if (level == 0) {
7348 if (phyp->children) {
7349 pmcs_unlock_phy_impl(phyp->children, level + 1);
7350 }
7351 } else {
7352 phy_next = phyp;
7353 while (phy_next) {
7354 if (phy_next->children) {
7355 pmcs_unlock_phy_impl(phy_next->children,
7356 level + 1);
7357 }
7358 phy_next = phy_next->sibling;
7359 }
7360 }
7361
7362 /*
7363 * Iterate through PHYs unlocking all at level > 0 as well the top PHY
7364 */
7365 phy_next = phyp;
7366 while (phy_next) {
7367 if ((level > 0) || (phy_next == phyp)) {
7368 pmcs_prt(phy_next->pwp, PMCS_PRT_DEBUG_PHY_LOCKING,
7369 phy_next, NULL,
7370 "%s: PHY 0x%p parent 0x%p path %s lvl %d",
7371 __func__, (void *)phy_next,
7372 (void *)phy_next->parent, phy_next->path, level);
7373 mutex_exit(&phy_next->phy_lock);
7374 }
7375
7376 if (level == 0) {
7377 return;
7378 }
7379
7380 phy_next = phy_next->sibling;
7381 }
7382 }
7383
7384 /*
7385 * pmcs_unlock_phy
7386 *
7387 * Unlock a PHY and all its descendents
7388 */
7389 void
pmcs_unlock_phy(pmcs_phy_t * phyp)7390 pmcs_unlock_phy(pmcs_phy_t *phyp)
7391 {
7392 #ifdef DEBUG
7393 char *callername = NULL;
7394 ulong_t off;
7395
7396 ASSERT(phyp != NULL);
7397
7398 callername = modgetsymname((uintptr_t)caller(), &off);
7399
7400 if (callername == NULL) {
7401 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL,
7402 "%s: PHY 0x%p path %s caller: unknown", __func__,
7403 (void *)phyp, phyp->path);
7404 } else {
7405 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL,
7406 "%s: PHY 0x%p path %s caller: %s+%lx", __func__,
7407 (void *)phyp, phyp->path, callername, off);
7408 }
7409 #else
7410 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING, phyp, NULL,
7411 "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path);
7412 #endif
7413 pmcs_unlock_phy_impl(phyp, 0);
7414 }
7415
7416 /*
7417 * pmcs_get_root_phy
7418 *
7419 * For a given phy pointer return its root phy.
7420 * This function must only be called during discovery in order to ensure that
7421 * the chain of PHYs from phyp up to the root PHY doesn't change.
7422 */
7423 pmcs_phy_t *
pmcs_get_root_phy(pmcs_phy_t * phyp)7424 pmcs_get_root_phy(pmcs_phy_t *phyp)
7425 {
7426 ASSERT(phyp);
7427
7428 while (phyp) {
7429 if (IS_ROOT_PHY(phyp)) {
7430 break;
7431 }
7432 phyp = phyp->parent;
7433 }
7434
7435 return (phyp);
7436 }
7437
7438 /*
7439 * pmcs_free_dma_chunklist
7440 *
7441 * Free DMA S/G chunk list
7442 */
7443 void
pmcs_free_dma_chunklist(pmcs_hw_t * pwp)7444 pmcs_free_dma_chunklist(pmcs_hw_t *pwp)
7445 {
7446 pmcs_chunk_t *pchunk;
7447
7448 while (pwp->dma_chunklist) {
7449 pchunk = pwp->dma_chunklist;
7450 pwp->dma_chunklist = pwp->dma_chunklist->next;
7451 if (pchunk->dma_handle) {
7452 if (ddi_dma_unbind_handle(pchunk->dma_handle) !=
7453 DDI_SUCCESS) {
7454 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
7455 "Condition failed at %s():%d",
7456 __func__, __LINE__);
7457 }
7458 ddi_dma_free_handle(&pchunk->dma_handle);
7459 ddi_dma_mem_free(&pchunk->acc_handle);
7460 }
7461 kmem_free(pchunk, sizeof (pmcs_chunk_t));
7462 }
7463 }
7464
7465 /*ARGSUSED2*/
7466 int
pmcs_phy_constructor(void * buf,void * arg,int kmflags)7467 pmcs_phy_constructor(void *buf, void *arg, int kmflags)
7468 {
7469 pmcs_hw_t *pwp = (pmcs_hw_t *)arg;
7470 pmcs_phy_t *phyp = (pmcs_phy_t *)buf;
7471
7472 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER,
7473 DDI_INTR_PRI(pwp->intr_pri));
7474 cv_init(&phyp->abort_all_cv, NULL, CV_DRIVER, NULL);
7475 return (0);
7476 }
7477
7478 /*ARGSUSED1*/
7479 void
pmcs_phy_destructor(void * buf,void * arg)7480 pmcs_phy_destructor(void *buf, void *arg)
7481 {
7482 pmcs_phy_t *phyp = (pmcs_phy_t *)buf;
7483
7484 cv_destroy(&phyp->abort_all_cv);
7485 mutex_destroy(&phyp->phy_lock);
7486 }
7487
7488 /*
7489 * Free all PHYs from the kmem_cache starting at phyp as well as everything
7490 * on the dead_phys list.
7491 *
7492 * NOTE: This function does not free root PHYs as they are not allocated
7493 * from the kmem_cache.
7494 *
7495 * No PHY locks are acquired as this should only be called during DDI_DETACH
7496 * or soft reset (while pmcs interrupts are disabled).
7497 */
7498 void
pmcs_free_all_phys(pmcs_hw_t * pwp,pmcs_phy_t * phyp)7499 pmcs_free_all_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
7500 {
7501 pmcs_phy_t *tphyp, *nphyp, *cphyp;
7502
7503 if (phyp == NULL) {
7504 return;
7505 }
7506
7507 for (tphyp = phyp; tphyp; tphyp = nphyp) {
7508 nphyp = tphyp->sibling;
7509 cphyp = tphyp->children;
7510
7511 if (cphyp) {
7512 tphyp->children = NULL;
7513 pmcs_free_all_phys(pwp, cphyp);
7514 }
7515
7516 if (!IS_ROOT_PHY(tphyp)) {
7517 tphyp->target_addr = NULL;
7518 kmem_cache_free(pwp->phy_cache, tphyp);
7519 }
7520 }
7521
7522 mutex_enter(&pwp->dead_phylist_lock);
7523 for (tphyp = pwp->dead_phys; tphyp; tphyp = nphyp) {
7524 nphyp = tphyp->dead_next;
7525 tphyp->target_addr = NULL;
7526 kmem_cache_free(pwp->phy_cache, tphyp);
7527 }
7528 pwp->dead_phys = NULL;
7529 mutex_exit(&pwp->dead_phylist_lock);
7530 }
7531
7532 /*
7533 * Free a list of PHYs linked together by the sibling pointer back to the
7534 * kmem cache from whence they came. This function does not recurse, so the
7535 * caller must ensure there are no children.
7536 */
7537 void
pmcs_free_phys(pmcs_hw_t * pwp,pmcs_phy_t * phyp)7538 pmcs_free_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
7539 {
7540 pmcs_phy_t *next_phy;
7541
7542 while (phyp) {
7543 next_phy = phyp->sibling;
7544 ASSERT(!mutex_owned(&phyp->phy_lock));
7545 phyp->target_addr = NULL;
7546 kmem_cache_free(pwp->phy_cache, phyp);
7547 phyp = next_phy;
7548 }
7549 }
7550
7551 /*
7552 * Make a copy of an existing PHY structure. This is used primarily in
7553 * discovery to compare the contents of an existing PHY with what gets
7554 * reported back by an expander.
7555 *
7556 * This function must not be called from any context where sleeping is
7557 * not possible.
7558 *
7559 * The new PHY is returned unlocked.
7560 */
7561 static pmcs_phy_t *
pmcs_clone_phy(pmcs_phy_t * orig_phy)7562 pmcs_clone_phy(pmcs_phy_t *orig_phy)
7563 {
7564 pmcs_phy_t *local;
7565
7566 local = kmem_cache_alloc(orig_phy->pwp->phy_cache, KM_SLEEP);
7567
7568 /*
7569 * Go ahead and just copy everything...
7570 */
7571 *local = *orig_phy;
7572 local->target_addr = &orig_phy->target;
7573
7574 /*
7575 * But the following must be set appropriately for this copy
7576 */
7577 local->sibling = NULL;
7578 local->children = NULL;
7579 local->target = NULL;
7580 mutex_init(&local->phy_lock, NULL, MUTEX_DRIVER,
7581 DDI_INTR_PRI(orig_phy->pwp->intr_pri));
7582
7583 return (local);
7584 }
7585
7586 int
pmcs_check_acc_handle(ddi_acc_handle_t handle)7587 pmcs_check_acc_handle(ddi_acc_handle_t handle)
7588 {
7589 ddi_fm_error_t de;
7590
7591 if (handle == NULL) {
7592 return (DDI_FAILURE);
7593 }
7594 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
7595 return (de.fme_status);
7596 }
7597
7598 int
pmcs_check_dma_handle(ddi_dma_handle_t handle)7599 pmcs_check_dma_handle(ddi_dma_handle_t handle)
7600 {
7601 ddi_fm_error_t de;
7602
7603 if (handle == NULL) {
7604 return (DDI_FAILURE);
7605 }
7606 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
7607 return (de.fme_status);
7608 }
7609
7610
7611 void
pmcs_fm_ereport(pmcs_hw_t * pwp,char * detail)7612 pmcs_fm_ereport(pmcs_hw_t *pwp, char *detail)
7613 {
7614 uint64_t ena;
7615 char buf[FM_MAX_CLASS];
7616
7617 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7618 ena = fm_ena_generate(0, FM_ENA_FMT1);
7619 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities)) {
7620 ddi_fm_ereport_post(pwp->dip, buf, ena, DDI_NOSLEEP,
7621 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
7622 }
7623 }
7624
7625 int
pmcs_check_acc_dma_handle(pmcs_hw_t * pwp)7626 pmcs_check_acc_dma_handle(pmcs_hw_t *pwp)
7627 {
7628 pmcs_chunk_t *pchunk;
7629 int i;
7630
7631 /* check all acc & dma handles allocated in attach */
7632 if ((pmcs_check_acc_handle(pwp->pci_acc_handle) != DDI_SUCCESS) ||
7633 (pmcs_check_acc_handle(pwp->msg_acc_handle) != DDI_SUCCESS) ||
7634 (pmcs_check_acc_handle(pwp->top_acc_handle) != DDI_SUCCESS) ||
7635 (pmcs_check_acc_handle(pwp->mpi_acc_handle) != DDI_SUCCESS) ||
7636 (pmcs_check_acc_handle(pwp->gsm_acc_handle) != DDI_SUCCESS)) {
7637 goto check_failed;
7638 }
7639
7640 for (i = 0; i < PMCS_NIQ; i++) {
7641 if ((pmcs_check_dma_handle(
7642 pwp->iqp_handles[i]) != DDI_SUCCESS) ||
7643 (pmcs_check_acc_handle(
7644 pwp->iqp_acchdls[i]) != DDI_SUCCESS)) {
7645 goto check_failed;
7646 }
7647 }
7648
7649 for (i = 0; i < PMCS_NOQ; i++) {
7650 if ((pmcs_check_dma_handle(
7651 pwp->oqp_handles[i]) != DDI_SUCCESS) ||
7652 (pmcs_check_acc_handle(
7653 pwp->oqp_acchdls[i]) != DDI_SUCCESS)) {
7654 goto check_failed;
7655 }
7656 }
7657
7658 if ((pmcs_check_dma_handle(pwp->cip_handles) != DDI_SUCCESS) ||
7659 (pmcs_check_acc_handle(pwp->cip_acchdls) != DDI_SUCCESS)) {
7660 goto check_failed;
7661 }
7662
7663 if (pwp->fwlog &&
7664 ((pmcs_check_dma_handle(pwp->fwlog_hndl) != DDI_SUCCESS) ||
7665 (pmcs_check_acc_handle(pwp->fwlog_acchdl) != DDI_SUCCESS))) {
7666 goto check_failed;
7667 }
7668
7669 if (pwp->regdump_hndl && pwp->regdump_acchdl &&
7670 ((pmcs_check_dma_handle(pwp->regdump_hndl) != DDI_SUCCESS) ||
7671 (pmcs_check_acc_handle(pwp->regdump_acchdl)
7672 != DDI_SUCCESS))) {
7673 goto check_failed;
7674 }
7675
7676
7677 pchunk = pwp->dma_chunklist;
7678 while (pchunk) {
7679 if ((pmcs_check_acc_handle(pchunk->acc_handle)
7680 != DDI_SUCCESS) ||
7681 (pmcs_check_dma_handle(pchunk->dma_handle)
7682 != DDI_SUCCESS)) {
7683 goto check_failed;
7684 }
7685 pchunk = pchunk->next;
7686 }
7687
7688 return (0);
7689
7690 check_failed:
7691
7692 return (1);
7693 }
7694
7695 /*
7696 * pmcs_handle_dead_phys
7697 *
7698 * If the PHY has no outstanding work associated with it, remove it from
7699 * the dead PHY list and free it.
7700 *
7701 * If pwp->ds_err_recovering or pwp->configuring is set, don't run.
7702 * This keeps routines that need to submit work to the chip from having to
7703 * hold PHY locks to ensure that PHYs don't disappear while they do their work.
7704 */
7705 void
pmcs_handle_dead_phys(pmcs_hw_t * pwp)7706 pmcs_handle_dead_phys(pmcs_hw_t *pwp)
7707 {
7708 pmcs_phy_t *phyp, *nphyp, *pphyp;
7709
7710 mutex_enter(&pwp->lock);
7711 mutex_enter(&pwp->config_lock);
7712
7713 if (pwp->configuring | pwp->ds_err_recovering) {
7714 mutex_exit(&pwp->config_lock);
7715 mutex_exit(&pwp->lock);
7716 return;
7717 }
7718
7719 /*
7720 * Check every PHY in the dead PHY list
7721 */
7722 mutex_enter(&pwp->dead_phylist_lock);
7723 phyp = pwp->dead_phys;
7724 pphyp = NULL; /* Set previous PHY to NULL */
7725
7726 while (phyp != NULL) {
7727 pmcs_lock_phy(phyp);
7728 ASSERT(phyp->dead);
7729
7730 nphyp = phyp->dead_next;
7731
7732 /*
7733 * Check for outstanding work
7734 */
7735 if (phyp->ref_count > 0) {
7736 pmcs_unlock_phy(phyp);
7737 pphyp = phyp; /* This PHY becomes "previous" */
7738 } else if (phyp->target) {
7739 pmcs_unlock_phy(phyp);
7740 pmcs_prt(pwp, PMCS_PRT_DEBUG1, phyp, phyp->target,
7741 "%s: Not freeing PHY 0x%p: target 0x%p is not free",
7742 __func__, (void *)phyp, (void *)phyp->target);
7743 pphyp = phyp;
7744 } else {
7745 /*
7746 * No outstanding work or target references. Remove it
7747 * from the list and free it
7748 */
7749 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target,
7750 "%s: Freeing inactive dead PHY 0x%p @ %s "
7751 "target = 0x%p", __func__, (void *)phyp,
7752 phyp->path, (void *)phyp->target);
7753 /*
7754 * If pphyp is NULL, then phyp was the head of the list,
7755 * so just reset the head to nphyp. Otherwise, the
7756 * previous PHY will now point to nphyp (the next PHY)
7757 */
7758 if (pphyp == NULL) {
7759 pwp->dead_phys = nphyp;
7760 } else {
7761 pphyp->dead_next = nphyp;
7762 }
7763 /*
7764 * If the target still points to this PHY, remove
7765 * that linkage now.
7766 */
7767 if (phyp->target) {
7768 mutex_enter(&phyp->target->statlock);
7769 if (phyp->target->phy == phyp) {
7770 phyp->target->phy = NULL;
7771 }
7772 mutex_exit(&phyp->target->statlock);
7773 }
7774 pmcs_unlock_phy(phyp);
7775 phyp->target_addr = NULL;
7776 kmem_cache_free(pwp->phy_cache, phyp);
7777 }
7778
7779 phyp = nphyp;
7780 }
7781
7782 mutex_exit(&pwp->dead_phylist_lock);
7783 mutex_exit(&pwp->config_lock);
7784 mutex_exit(&pwp->lock);
7785 }
7786
7787 void
pmcs_inc_phy_ref_count(pmcs_phy_t * phyp)7788 pmcs_inc_phy_ref_count(pmcs_phy_t *phyp)
7789 {
7790 atomic_inc_32(&phyp->ref_count);
7791 }
7792
7793 void
pmcs_dec_phy_ref_count(pmcs_phy_t * phyp)7794 pmcs_dec_phy_ref_count(pmcs_phy_t *phyp)
7795 {
7796 ASSERT(phyp->ref_count != 0);
7797 atomic_dec_32(&phyp->ref_count);
7798 }
7799
7800 /*
7801 * pmcs_reap_dead_phy
7802 *
7803 * This function is called from pmcs_new_tport when we have a PHY
7804 * without a target pointer. It's possible in that case that this PHY
7805 * may have a "brother" on the dead_phys list. That is, it may be the same as
7806 * this one but with a different root PHY number (e.g. pp05 vs. pp04). If
7807 * that's the case, update the dead PHY and this new PHY. If that's not the
7808 * case, we should get a tran_tgt_init on this after it's reported to SCSA.
7809 *
7810 * Called with PHY locked.
7811 */
7812 static void
pmcs_reap_dead_phy(pmcs_phy_t * phyp)7813 pmcs_reap_dead_phy(pmcs_phy_t *phyp)
7814 {
7815 pmcs_hw_t *pwp = phyp->pwp;
7816 pmcs_phy_t *ctmp;
7817 pmcs_iport_t *iport_cmp;
7818
7819 ASSERT(mutex_owned(&phyp->phy_lock));
7820
7821 /*
7822 * Check the dead PHYs list
7823 */
7824 mutex_enter(&pwp->dead_phylist_lock);
7825 ctmp = pwp->dead_phys;
7826 while (ctmp) {
7827 /*
7828 * If the iport is NULL, compare against last_iport.
7829 */
7830 if (ctmp->iport) {
7831 iport_cmp = ctmp->iport;
7832 } else {
7833 iport_cmp = ctmp->last_iport;
7834 }
7835
7836 if ((iport_cmp != phyp->iport) ||
7837 (memcmp((void *)&ctmp->sas_address[0],
7838 (void *)&phyp->sas_address[0], 8))) {
7839 ctmp = ctmp->dead_next;
7840 continue;
7841 }
7842
7843 /*
7844 * Same SAS address on same iport. Now check to see if
7845 * the PHY path is the same with the possible exception
7846 * of the root PHY number.
7847 * The "5" is the string length of "pp00."
7848 */
7849 if ((strnlen(phyp->path, 5) >= 5) &&
7850 (strnlen(ctmp->path, 5) >= 5)) {
7851 if (memcmp((void *)&phyp->path[5],
7852 (void *)&ctmp->path[5],
7853 strnlen(phyp->path, 32) - 5) == 0) {
7854 break;
7855 }
7856 }
7857
7858 ctmp = ctmp->dead_next;
7859 }
7860 mutex_exit(&pwp->dead_phylist_lock);
7861
7862 /*
7863 * Found a match. Remove the target linkage and drop the
7864 * ref count on the old PHY. Then, increment the ref count
7865 * on the new PHY to compensate.
7866 */
7867 if (ctmp) {
7868 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, ctmp, NULL,
7869 "%s: Found match in dead PHY list (0x%p) for new PHY %s",
7870 __func__, (void *)ctmp, phyp->path);
7871 /*
7872 * If there is a pointer to the target in the dead PHY, move
7873 * all reference counts to the new PHY.
7874 */
7875 if (ctmp->target) {
7876 mutex_enter(&ctmp->target->statlock);
7877 phyp->target = ctmp->target;
7878
7879 while (ctmp->ref_count != 0) {
7880 pmcs_inc_phy_ref_count(phyp);
7881 pmcs_dec_phy_ref_count(ctmp);
7882 }
7883 /*
7884 * Update the target's linkage as well
7885 */
7886 phyp->target->phy = phyp;
7887 phyp->target->dtype = phyp->dtype;
7888 ctmp->target = NULL;
7889 mutex_exit(&phyp->target->statlock);
7890 }
7891 }
7892 }
7893
7894 /*
7895 * Called with iport lock held
7896 */
7897 void
pmcs_add_phy_to_iport(pmcs_iport_t * iport,pmcs_phy_t * phyp)7898 pmcs_add_phy_to_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp)
7899 {
7900 ASSERT(mutex_owned(&iport->lock));
7901 ASSERT(phyp);
7902 ASSERT(!list_link_active(&phyp->list_node));
7903
7904 iport->nphy++;
7905 list_insert_tail(&iport->phys, phyp);
7906 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
7907 &iport->nphy);
7908 mutex_enter(&phyp->phy_lock);
7909 pmcs_create_one_phy_stats(iport, phyp);
7910 mutex_exit(&phyp->phy_lock);
7911 pmcs_hold_iport(iport);
7912 }
7913
7914 /*
7915 * Called with the iport lock held
7916 */
7917 void
pmcs_remove_phy_from_iport(pmcs_iport_t * iport,pmcs_phy_t * phyp)7918 pmcs_remove_phy_from_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp)
7919 {
7920 pmcs_phy_t *pptr, *next_pptr;
7921
7922 ASSERT(mutex_owned(&iport->lock));
7923
7924 /*
7925 * If phyp is NULL, remove all PHYs from the iport
7926 */
7927 if (phyp == NULL) {
7928 for (pptr = list_head(&iport->phys); pptr != NULL;
7929 pptr = next_pptr) {
7930 next_pptr = list_next(&iport->phys, pptr);
7931 mutex_enter(&pptr->phy_lock);
7932 if (pptr->phy_stats != NULL) {
7933 kstat_delete(pptr->phy_stats);
7934 pptr->phy_stats = NULL;
7935 }
7936 pptr->iport = NULL;
7937 pmcs_update_phy_pm_props(pptr, pptr->att_port_pm_tmp,
7938 pptr->tgt_port_pm_tmp, B_FALSE);
7939 mutex_exit(&pptr->phy_lock);
7940 pmcs_rele_iport(iport);
7941 list_remove(&iport->phys, pptr);
7942 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32,
7943 PMCS_NUM_PHYS, &iport->nphy);
7944 }
7945 iport->nphy = 0;
7946 return;
7947 }
7948
7949 ASSERT(phyp);
7950 ASSERT(iport->nphy > 0);
7951 ASSERT(list_link_active(&phyp->list_node));
7952 iport->nphy--;
7953 list_remove(&iport->phys, phyp);
7954 pmcs_update_phy_pm_props(phyp, phyp->att_port_pm_tmp,
7955 phyp->tgt_port_pm_tmp, B_FALSE);
7956 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
7957 &iport->nphy);
7958 pmcs_rele_iport(iport);
7959 }
7960
7961 /*
7962 * This function checks to see if the target pointed to by phyp is still
7963 * correct. This is done by comparing the target's unit address with the
7964 * SAS address in phyp.
7965 *
7966 * Called with PHY locked and target statlock held
7967 */
7968 static boolean_t
pmcs_phy_target_match(pmcs_phy_t * phyp)7969 pmcs_phy_target_match(pmcs_phy_t *phyp)
7970 {
7971 uint64_t wwn;
7972 char unit_address[PMCS_MAX_UA_SIZE];
7973 boolean_t rval = B_FALSE;
7974
7975 ASSERT(phyp);
7976 ASSERT(phyp->target);
7977 ASSERT(mutex_owned(&phyp->phy_lock));
7978 ASSERT(mutex_owned(&phyp->target->statlock));
7979
7980 wwn = pmcs_barray2wwn(phyp->sas_address);
7981 (void) scsi_wwn_to_wwnstr(wwn, 1, unit_address);
7982
7983 if (memcmp((void *)unit_address, (void *)phyp->target->unit_address,
7984 strnlen(phyp->target->unit_address, PMCS_MAX_UA_SIZE)) == 0) {
7985 rval = B_TRUE;
7986 }
7987
7988 return (rval);
7989 }
7990 /*
7991 * Commands used to serialize SMP requests.
7992 *
7993 * The SPC only allows 2 SMP commands per SMP target: 1 cmd pending and 1 cmd
7994 * queued for the same SMP target. If a third SMP cmd is sent to the SPC for an
7995 * SMP target that already has a SMP cmd pending and one queued, then the
7996 * SPC responds with the ERROR_INTERNAL_SMP_RESOURCE response.
7997 *
7998 * Additionally, the SPC has an 8 entry deep cmd queue and the number of SMP
7999 * cmds that can be queued is controlled by the PORT_CONTROL IOMB. The
8000 * SPC default is 1 SMP command/port (iport). These 2 queued SMP cmds would
8001 * have to be for different SMP targets. The INTERNAL_SMP_RESOURCE error will
8002 * also be returned if a 2nd SMP cmd is sent to the controller when there is
8003 * already 1 SMP cmd queued for that port or if a 3rd SMP cmd is sent to the
8004 * queue if there are already 2 queued SMP cmds.
8005 */
8006 void
pmcs_smp_acquire(pmcs_iport_t * iport)8007 pmcs_smp_acquire(pmcs_iport_t *iport)
8008 {
8009 if (iport == NULL) {
8010 return;
8011 }
8012
8013 mutex_enter(&iport->smp_lock);
8014 while (iport->smp_active) {
8015 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL,
8016 "%s: SMP is active on thread 0x%p, waiting", __func__,
8017 (void *)iport->smp_active_thread);
8018 cv_wait(&iport->smp_cv, &iport->smp_lock);
8019 }
8020 iport->smp_active = B_TRUE;
8021 iport->smp_active_thread = curthread;
8022 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG3, NULL, NULL,
8023 "%s: SMP acquired by thread 0x%p", __func__,
8024 (void *)iport->smp_active_thread);
8025 mutex_exit(&iport->smp_lock);
8026 }
8027
8028 void
pmcs_smp_release(pmcs_iport_t * iport)8029 pmcs_smp_release(pmcs_iport_t *iport)
8030 {
8031 if (iport == NULL) {
8032 return;
8033 }
8034
8035 mutex_enter(&iport->smp_lock);
8036 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG3, NULL, NULL,
8037 "%s: SMP released by thread 0x%p", __func__, (void *)curthread);
8038 iport->smp_active = B_FALSE;
8039 iport->smp_active_thread = NULL;
8040 cv_signal(&iport->smp_cv);
8041 mutex_exit(&iport->smp_lock);
8042 }
8043
8044 /*
8045 * Update a PHY's attached-port-pm and target-port-pm properties
8046 *
8047 * phyp: PHY whose properties are to be updated
8048 *
8049 * att_bv: Bit value of the attached-port-pm property to be updated in the
8050 * 64-bit holding area for the PHY.
8051 *
8052 * tgt_bv: Bit value of the target-port-pm property to update in the 64-bit
8053 * holding area for the PHY.
8054 *
8055 * prop_add_val: If TRUE, we're adding bits into the property value.
8056 * Otherwise, we're taking them out. Either way, the properties for this
8057 * PHY will be updated.
8058 */
8059 void
pmcs_update_phy_pm_props(pmcs_phy_t * phyp,uint64_t att_bv,uint64_t tgt_bv,boolean_t prop_add_val)8060 pmcs_update_phy_pm_props(pmcs_phy_t *phyp, uint64_t att_bv, uint64_t tgt_bv,
8061 boolean_t prop_add_val)
8062 {
8063 pmcs_xscsi_t *tgt;
8064
8065 if (prop_add_val) {
8066 /*
8067 * If the values are currently 0, then we're setting the
8068 * phymask for just this PHY as well.
8069 */
8070 if (phyp->att_port_pm_tmp == 0) {
8071 phyp->att_port_pm = att_bv;
8072 phyp->tgt_port_pm = tgt_bv;
8073 }
8074 phyp->att_port_pm_tmp |= att_bv;
8075 phyp->tgt_port_pm_tmp |= tgt_bv;
8076 (void) snprintf(phyp->att_port_pm_str, PMCS_PM_MAX_NAMELEN,
8077 "%"PRIx64, phyp->att_port_pm_tmp);
8078 (void) snprintf(phyp->tgt_port_pm_str, PMCS_PM_MAX_NAMELEN,
8079 "%"PRIx64, phyp->tgt_port_pm_tmp);
8080 } else {
8081 phyp->att_port_pm_tmp &= ~att_bv;
8082 phyp->tgt_port_pm_tmp &= ~tgt_bv;
8083 if (phyp->att_port_pm_tmp) {
8084 (void) snprintf(phyp->att_port_pm_str,
8085 PMCS_PM_MAX_NAMELEN, "%"PRIx64,
8086 phyp->att_port_pm_tmp);
8087 } else {
8088 phyp->att_port_pm_str[0] = '\0';
8089 phyp->att_port_pm = 0;
8090 }
8091 if (phyp->tgt_port_pm_tmp) {
8092 (void) snprintf(phyp->tgt_port_pm_str,
8093 PMCS_PM_MAX_NAMELEN, "%"PRIx64,
8094 phyp->tgt_port_pm_tmp);
8095 } else {
8096 phyp->tgt_port_pm_str[0] = '\0';
8097 phyp->tgt_port_pm = 0;
8098 }
8099 }
8100
8101 if ((phyp->target_addr) && (*phyp->target_addr != NULL)) {
8102 tgt = *phyp->target_addr;
8103 } else if (phyp->target != NULL) {
8104 tgt = phyp->target;
8105 } else {
8106 return;
8107 }
8108
8109 mutex_enter(&tgt->statlock);
8110 if (!list_is_empty(&tgt->lun_list)) {
8111 pmcs_lun_t *lunp;
8112
8113 lunp = list_head(&tgt->lun_list);
8114 while (lunp) {
8115 (void) scsi_device_prop_update_string(lunp->sd,
8116 SCSI_DEVICE_PROP_PATH,
8117 SCSI_ADDR_PROP_ATTACHED_PORT_PM,
8118 phyp->att_port_pm_str);
8119 (void) scsi_device_prop_update_string(lunp->sd,
8120 SCSI_DEVICE_PROP_PATH,
8121 SCSI_ADDR_PROP_TARGET_PORT_PM,
8122 phyp->tgt_port_pm_str);
8123 lunp = list_next(&tgt->lun_list, lunp);
8124 }
8125 } else if (tgt->smpd) {
8126 (void) smp_device_prop_update_string(tgt->smpd,
8127 SCSI_ADDR_PROP_ATTACHED_PORT_PM,
8128 phyp->att_port_pm_str);
8129 (void) smp_device_prop_update_string(tgt->smpd,
8130 SCSI_ADDR_PROP_TARGET_PORT_PM,
8131 phyp->tgt_port_pm_str);
8132 }
8133 mutex_exit(&tgt->statlock);
8134 }
8135
8136 /* ARGSUSED */
8137 void
pmcs_deregister_device_work(pmcs_hw_t * pwp,pmcs_phy_t * phyp)8138 pmcs_deregister_device_work(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
8139 {
8140 pmcs_phy_t *pptr;
8141
8142 for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
8143 pmcs_lock_phy(pptr);
8144 if (pptr->deregister_wait) {
8145 pmcs_deregister_device(pwp, pptr);
8146 }
8147 pmcs_unlock_phy(pptr);
8148 }
8149 }
8150
8151 /*
8152 * pmcs_iport_active
8153 *
8154 * Mark this iport as active. Called with the iport lock held.
8155 */
8156 static void
pmcs_iport_active(pmcs_iport_t * iport)8157 pmcs_iport_active(pmcs_iport_t *iport)
8158 {
8159 ASSERT(mutex_owned(&iport->lock));
8160
8161 iport->ua_state = UA_ACTIVE;
8162 iport->smp_active = B_FALSE;
8163 iport->smp_active_thread = NULL;
8164 }
8165
8166 /* ARGSUSED */
8167 static void
pmcs_tgtmap_activate_cb(void * tgtmap_priv,char * tgt_addr,scsi_tgtmap_tgt_type_t tgt_type,void ** tgt_privp)8168 pmcs_tgtmap_activate_cb(void *tgtmap_priv, char *tgt_addr,
8169 scsi_tgtmap_tgt_type_t tgt_type, void **tgt_privp)
8170 {
8171 pmcs_iport_t *iport = (pmcs_iport_t *)tgtmap_priv;
8172 pmcs_hw_t *pwp = iport->pwp;
8173 pmcs_xscsi_t *target;
8174
8175 /*
8176 * Look up the target. If there is one, and it doesn't have a PHY
8177 * pointer, re-establish that linkage here.
8178 */
8179 mutex_enter(&pwp->lock);
8180 target = pmcs_get_target(iport, tgt_addr, B_FALSE);
8181 mutex_exit(&pwp->lock);
8182
8183 /*
8184 * If we got a target, it will now have a PHY pointer and the PHY
8185 * will point to the target. The PHY will be locked, so we'll need
8186 * to unlock it.
8187 */
8188 if (target != NULL) {
8189 pmcs_unlock_phy(target->phy);
8190 }
8191
8192 /*
8193 * Update config_restart_time so we don't try to restart discovery
8194 * while enumeration is still in progress.
8195 */
8196 mutex_enter(&pwp->config_lock);
8197 pwp->config_restart_time = ddi_get_lbolt() +
8198 drv_usectohz(PMCS_REDISCOVERY_DELAY);
8199 mutex_exit(&pwp->config_lock);
8200 }
8201
8202 /* ARGSUSED */
8203 static boolean_t
pmcs_tgtmap_deactivate_cb(void * tgtmap_priv,char * tgt_addr,scsi_tgtmap_tgt_type_t tgt_type,void * tgt_priv,scsi_tgtmap_deact_rsn_t tgt_deact_rsn)8204 pmcs_tgtmap_deactivate_cb(void *tgtmap_priv, char *tgt_addr,
8205 scsi_tgtmap_tgt_type_t tgt_type, void *tgt_priv,
8206 scsi_tgtmap_deact_rsn_t tgt_deact_rsn)
8207 {
8208 pmcs_iport_t *iport = (pmcs_iport_t *)tgtmap_priv;
8209 pmcs_phy_t *phyp;
8210 boolean_t rediscover = B_FALSE;
8211
8212 ASSERT(iport);
8213
8214 phyp = pmcs_find_phy_by_sas_address(iport->pwp, iport, NULL, tgt_addr);
8215 if (phyp == NULL) {
8216 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL,
8217 "%s: Couldn't find PHY at %s", __func__, tgt_addr);
8218 return (rediscover);
8219 }
8220 /* phyp is locked */
8221
8222 if (!phyp->reenumerate && phyp->configured) {
8223 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp, phyp->target,
8224 "%s: PHY @ %s is configured... re-enumerate", __func__,
8225 tgt_addr);
8226 phyp->reenumerate = 1;
8227 }
8228
8229 /*
8230 * Check to see if reenumerate is set, and if so, if we've reached our
8231 * maximum number of retries.
8232 */
8233 if (phyp->reenumerate) {
8234 if (phyp->enum_attempts == PMCS_MAX_REENUMERATE) {
8235 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp,
8236 phyp->target,
8237 "%s: No more enumeration attempts for %s", __func__,
8238 tgt_addr);
8239 } else {
8240 pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, phyp,
8241 phyp->target, "%s: Re-attempt enumeration for %s",
8242 __func__, tgt_addr);
8243 ++phyp->enum_attempts;
8244 rediscover = B_TRUE;
8245 }
8246
8247 phyp->reenumerate = 0;
8248 }
8249
8250 pmcs_unlock_phy(phyp);
8251
8252 mutex_enter(&iport->pwp->config_lock);
8253 iport->pwp->config_restart_time = ddi_get_lbolt() +
8254 drv_usectohz(PMCS_REDISCOVERY_DELAY);
8255 if (rediscover) {
8256 iport->pwp->config_restart = B_TRUE;
8257 } else if (iport->pwp->config_restart == B_TRUE) {
8258 /*
8259 * If we aren't asking for rediscovery because of this PHY,
8260 * check to see if we're already asking for it on behalf of
8261 * some other PHY. If so, we'll want to return TRUE, so reset
8262 * "rediscover" here.
8263 */
8264 rediscover = B_TRUE;
8265 }
8266
8267 mutex_exit(&iport->pwp->config_lock);
8268
8269 return (rediscover);
8270 }
8271
8272 void
pmcs_status_disposition(pmcs_phy_t * phyp,uint32_t status)8273 pmcs_status_disposition(pmcs_phy_t *phyp, uint32_t status)
8274 {
8275 ASSERT(phyp);
8276 ASSERT(!mutex_owned(&phyp->phy_lock));
8277
8278 if (phyp == NULL) {
8279 return;
8280 }
8281
8282 pmcs_lock_phy(phyp);
8283
8284 /*
8285 * XXX: Do we need to call this function from an SSP_EVENT?
8286 */
8287
8288 switch (status) {
8289 case PMCOUT_STATUS_NO_DEVICE:
8290 case PMCOUT_STATUS_ERROR_HW_TIMEOUT:
8291 case PMCOUT_STATUS_XFER_ERR_BREAK:
8292 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
8293 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED:
8294 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION:
8295 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK:
8296 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION:
8297 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
8298 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
8299 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION:
8300 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR:
8301 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED:
8302 case PMCOUT_STATUS_XFER_ERROR_RX_FRAME:
8303 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
8304 case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE:
8305 case PMCOUT_STATUS_IO_PORT_IN_RESET:
8306 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL:
8307 case PMCOUT_STATUS_IO_DS_IN_RECOVERY:
8308 case PMCOUT_STATUS_IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
8309 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG, phyp, phyp->target,
8310 "%s: status = 0x%x for " SAS_ADDR_FMT ", reenumerate",
8311 __func__, status, SAS_ADDR_PRT(phyp->sas_address));
8312 phyp->reenumerate = 1;
8313 break;
8314
8315 default:
8316 pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG, phyp, phyp->target,
8317 "%s: status = 0x%x for " SAS_ADDR_FMT ", no reenumeration",
8318 __func__, status, SAS_ADDR_PRT(phyp->sas_address));
8319 break;
8320 }
8321
8322 pmcs_unlock_phy(phyp);
8323 }
8324
8325 /*
8326 * Add the list of PHYs pointed to by phyp to the dead_phys_list
8327 *
8328 * Called with all PHYs in the list locked
8329 */
8330 static void
pmcs_add_dead_phys(pmcs_hw_t * pwp,pmcs_phy_t * phyp)8331 pmcs_add_dead_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
8332 {
8333 mutex_enter(&pwp->dead_phylist_lock);
8334 while (phyp) {
8335 pmcs_phy_t *nxt = phyp->sibling;
8336 ASSERT(phyp->dead);
8337 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, NULL,
8338 "%s: dead PHY 0x%p (%s) (ref_count %d)", __func__,
8339 (void *)phyp, phyp->path, phyp->ref_count);
8340 /*
8341 * Put this PHY on the dead PHY list for the watchdog to
8342 * clean up after any outstanding work has completed.
8343 */
8344 phyp->dead_next = pwp->dead_phys;
8345 pwp->dead_phys = phyp;
8346 pmcs_unlock_phy(phyp);
8347 phyp = nxt;
8348 }
8349 mutex_exit(&pwp->dead_phylist_lock);
8350 }
8351
8352 static void
pmcs_get_fw_version(pmcs_hw_t * pwp)8353 pmcs_get_fw_version(pmcs_hw_t *pwp)
8354 {
8355 uint32_t ila_len, ver_hi, ver_lo;
8356 uint8_t ila_ver_string[9], img_flag;
8357 char uc, *ucp = &uc;
8358 unsigned long ila_ver;
8359 uint64_t ver_hilo;
8360
8361 /* Firmware version is easy. */
8362 pwp->fw = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FW);
8363
8364 /*
8365 * Get the image size (2nd to last dword)
8366 * NOTE: The GSM registers are mapped little-endian, but the data
8367 * on the flash is actually big-endian, so we need to swap these values
8368 * regardless of which platform we're on.
8369 */
8370 ila_len = BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER,
8371 GSM_FLASH_BASE + GSM_SM_BLKSZ - (2 << 2)));
8372 if (ila_len > 65535) {
8373 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
8374 "%s: Invalid ILA image size (0x%x)?", __func__, ila_len);
8375 return;
8376 }
8377
8378 /*
8379 * The numeric version is at ila_len - PMCS_ILA_VER_OFFSET
8380 */
8381 ver_hi = BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER,
8382 GSM_FLASH_BASE + ila_len - PMCS_ILA_VER_OFFSET));
8383 ver_lo = BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER,
8384 GSM_FLASH_BASE + ila_len - PMCS_ILA_VER_OFFSET + 4));
8385 ver_hilo = BE_64(((uint64_t)ver_hi << 32) | ver_lo);
8386 bcopy((const void *)&ver_hilo, &ila_ver_string[0], 8);
8387 ila_ver_string[8] = '\0';
8388
8389 (void) ddi_strtoul((const char *)ila_ver_string, &ucp, 16, &ila_ver);
8390 pwp->ila_ver = (int)(ila_ver & 0xffffffff);
8391
8392 img_flag = (BSWAP_32(pmcs_rd_gsm_reg(pwp, GSM_FLASH_BASE_UPPER,
8393 GSM_FLASH_IMG_FLAGS)) & 0xff000000) >> 24;
8394 if (img_flag & PMCS_IMG_FLAG_A) {
8395 pwp->fw_active_img = 1;
8396 } else {
8397 pwp->fw_active_img = 0;
8398 }
8399 }
8400