1 /*
2 * O.S : Solaris
3 * FILE NAME : arcmsr.c
4 * BY : Erich Chen, C.L. Huang
5 * Description: SCSI RAID Device Driver for
6 * ARECA RAID Host adapter
7 *
8 * Copyright (C) 2002,2010 Areca Technology Corporation All rights reserved.
9 * Copyright (C) 2002,2010 Erich Chen
10 * Web site: www.areca.com.tw
11 * E-mail: erich@areca.com.tw; ching2048@areca.com.tw
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The party using or redistributing the source code and binary forms
22 * agrees to the disclaimer below and the terms and conditions set forth
23 * herein.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
38 * Use is subject to license terms.
39 *
40 */
41 /*
42 * This file and its contents are supplied under the terms of the
43 * Common Development and Distribution License ("CDDL"), version 1.0.
44 * You may only use this file in accordance with the terms of version
45 * 1.0 of the CDDL.
46 *
47 * A full copy of the text of the CDDL should have accompanied this
48 * source. A copy of the CDDL is also available via the Internet at
49 * http://www.illumos.org/license/CDDL.
50 */
51 /*
52 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
53 */
54 #include <sys/types.h>
55 #include <sys/ddidmareq.h>
56 #include <sys/scsi/scsi.h>
57 #include <sys/ddi.h>
58 #include <sys/sunddi.h>
59 #include <sys/file.h>
60 #include <sys/disp.h>
61 #include <sys/signal.h>
62 #include <sys/debug.h>
63 #include <sys/pci.h>
64 #include <sys/policy.h>
65 #include <sys/atomic.h>
66 #include "arcmsr.h"
67
68 static int arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd);
69 static int arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg,
70 int mode, cred_t *credp, int *rvalp);
71 static int arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd);
72 static int arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd);
73 static int arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
74 static int arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
75 static int arcmsr_tran_reset(struct scsi_address *ap, int level);
76 static int arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
77 static int arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
78 int whom);
79 static int arcmsr_tran_tgt_init(dev_info_t *host_dev_info,
80 dev_info_t *target_dev_info, scsi_hba_tran_t *hosttran,
81 struct scsi_device *sd);
82 static void arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
83 static void arcmsr_tran_destroy_pkt(struct scsi_address *ap,
84 struct scsi_pkt *pkt);
85 static void arcmsr_tran_sync_pkt(struct scsi_address *ap,
86 struct scsi_pkt *pkt);
87 static struct scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
88 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
89 int tgtlen, int flags, int (*callback)(), caddr_t arg);
90 static int arcmsr_config_child(struct ACB *acb, struct scsi_device *sd,
91 dev_info_t **dipp);
92
93 static int arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun,
94 dev_info_t **ldip);
95 static uint8_t arcmsr_abort_host_command(struct ACB *acb);
96 static uint8_t arcmsr_get_echo_from_iop(struct ACB *acb);
97 static uint_t arcmsr_intr_handler(caddr_t arg, caddr_t arg2);
98 static int arcmsr_initialize(struct ACB *acb);
99 static int arcmsr_dma_alloc(struct ACB *acb,
100 struct scsi_pkt *pkt, struct buf *bp, int flags, int (*callback)());
101 static int arcmsr_dma_move(struct ACB *acb,
102 struct scsi_pkt *pkt, struct buf *bp);
103 static void arcmsr_handle_iop_bus_hold(struct ACB *acb);
104 static void arcmsr_hbc_message_isr(struct ACB *acb);
105 static void arcmsr_pcidev_disattach(struct ACB *acb);
106 static void arcmsr_ccb_complete(struct CCB *ccb, int flag);
107 static void arcmsr_iop_init(struct ACB *acb);
108 static void arcmsr_iop_parking(struct ACB *acb);
109 /*PRINTFLIKE3*/
110 static void arcmsr_log(struct ACB *acb, int level, char *fmt, ...);
111 /*PRINTFLIKE2*/
112 static void arcmsr_warn(struct ACB *acb, char *fmt, ...);
113 static void arcmsr_mutex_init(struct ACB *acb);
114 static void arcmsr_remove_intr(struct ACB *acb);
115 static void arcmsr_ccbs_timeout(void* arg);
116 static void arcmsr_devMap_monitor(void* arg);
117 static void arcmsr_pcidev_disattach(struct ACB *acb);
118 static void arcmsr_iop_message_read(struct ACB *acb);
119 static void arcmsr_free_ccb(struct CCB *ccb);
120 static void arcmsr_post_ioctldata2iop(struct ACB *acb);
121 static void arcmsr_report_sense_info(struct CCB *ccb);
122 static void arcmsr_init_list_head(struct list_head *list);
123 static void arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org);
124 static void arcmsr_done4abort_postqueue(struct ACB *acb);
125 static void arcmsr_list_add_tail(kmutex_t *list_lock,
126 struct list_head *new_one, struct list_head *head);
127 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
128 static int arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt);
129 static int arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt);
130 static int arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb);
131 static int arcmsr_parse_devname(char *devnm, int *tgt, int *lun);
132 static int arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance);
133 static uint8_t arcmsr_iop_reset(struct ACB *acb);
134 static uint32_t arcmsr_disable_allintr(struct ACB *acb);
135 static uint32_t arcmsr_iop_confirm(struct ACB *acb);
136 static struct CCB *arcmsr_get_freeccb(struct ACB *acb);
137 static void arcmsr_flush_hba_cache(struct ACB *acb);
138 static void arcmsr_flush_hbb_cache(struct ACB *acb);
139 static void arcmsr_flush_hbc_cache(struct ACB *acb);
140 static void arcmsr_stop_hba_bgrb(struct ACB *acb);
141 static void arcmsr_stop_hbb_bgrb(struct ACB *acb);
142 static void arcmsr_stop_hbc_bgrb(struct ACB *acb);
143 static void arcmsr_start_hba_bgrb(struct ACB *acb);
144 static void arcmsr_start_hbb_bgrb(struct ACB *acb);
145 static void arcmsr_start_hbc_bgrb(struct ACB *acb);
146 static void arcmsr_mutex_destroy(struct ACB *acb);
147 static void arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
148 static void arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
149 static void arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
150 static void arcmsr_build_ccb(struct CCB *ccb);
151 static int arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
152 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
153 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
154 static dev_info_t *arcmsr_find_child(struct ACB *acb, uint16_t tgt,
155 uint8_t lun);
156 static struct QBUFFER *arcmsr_get_iop_rqbuffer(struct ACB *acb);
157
158 static int arcmsr_add_intr(struct ACB *, int);
159
160 static void *arcmsr_soft_state = NULL;
161
162 static ddi_dma_attr_t arcmsr_dma_attr = {
163 DMA_ATTR_V0, /* ddi_dma_attr version */
164 0, /* low DMA address range */
165 0xffffffffffffffffull, /* high DMA address range */
166 0x00ffffff, /* DMA counter counter upper bound */
167 1, /* DMA address alignment requirements */
168 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* burst sizes */
169 1, /* minimum effective DMA size */
170 ARCMSR_MAX_XFER_LEN, /* maximum DMA xfer size */
171 /*
172 * The dma_attr_seg field supplies the limit of each Scatter/Gather
173 * list element's "address+length". The Intel IOP331 can not use
174 * segments over the 4G boundary due to segment boundary restrictions
175 */
176 0xffffffff,
177 ARCMSR_MAX_SG_ENTRIES, /* scatter/gather list count */
178 1, /* device granularity */
179 DDI_DMA_FORCE_PHYSICAL /* Bus specific DMA flags */
180 };
181
182
183 static ddi_dma_attr_t arcmsr_ccb_attr = {
184 DMA_ATTR_V0, /* ddi_dma_attr version */
185 0, /* low DMA address range */
186 0xffffffff, /* high DMA address range */
187 0x00ffffff, /* DMA counter counter upper bound */
188 1, /* default byte alignment */
189 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* burst sizes */
190 1, /* minimum effective DMA size */
191 0xffffffff, /* maximum DMA xfer size */
192 0x00ffffff, /* max segment size, segment boundary restrictions */
193 1, /* scatter/gather list count */
194 1, /* device granularity */
195 DDI_DMA_FORCE_PHYSICAL /* Bus specific DMA flags */
196 };
197
198
199 static struct cb_ops arcmsr_cb_ops = {
200 scsi_hba_open, /* open(9E) */
201 scsi_hba_close, /* close(9E) */
202 nodev, /* strategy(9E), returns ENXIO */
203 nodev, /* print(9E) */
204 nodev, /* dump(9E) Cannot be used as a dump device */
205 nodev, /* read(9E) */
206 nodev, /* write(9E) */
207 arcmsr_cb_ioctl, /* ioctl(9E) */
208 nodev, /* devmap(9E) */
209 nodev, /* mmap(9E) */
210 nodev, /* segmap(9E) */
211 NULL, /* chpoll(9E) returns ENXIO */
212 nodev, /* prop_op(9E) */
213 NULL, /* streamtab(9S) */
214 D_MP,
215 CB_REV,
216 nodev, /* aread(9E) */
217 nodev /* awrite(9E) */
218 };
219
220 static struct dev_ops arcmsr_ops = {
221 DEVO_REV, /* devo_rev */
222 0, /* reference count */
223 nodev, /* getinfo */
224 nulldev, /* identify */
225 nulldev, /* probe */
226 arcmsr_attach, /* attach */
227 arcmsr_detach, /* detach */
228 arcmsr_reset, /* reset, shutdown, reboot notify */
229 &arcmsr_cb_ops, /* driver operations */
230 NULL, /* bus operations */
231 NULL /* power */
232 };
233
234 static struct modldrv arcmsr_modldrv = {
235 &mod_driverops, /* Type of module. This is a driver. */
236 "ARECA RAID Controller", /* module name, from arcmsr.h */
237 &arcmsr_ops, /* driver ops */
238 };
239
240 static struct modlinkage arcmsr_modlinkage = {
241 MODREV_1,
242 &arcmsr_modldrv,
243 NULL
244 };
245
246
247 int
_init(void)248 _init(void)
249 {
250 int ret;
251
252 ret = ddi_soft_state_init(&arcmsr_soft_state, sizeof (struct ACB), 1);
253 if (ret != 0) {
254 return (ret);
255 }
256 if ((ret = scsi_hba_init(&arcmsr_modlinkage)) != 0) {
257 ddi_soft_state_fini(&arcmsr_soft_state);
258 return (ret);
259 }
260
261 if ((ret = mod_install(&arcmsr_modlinkage)) != 0) {
262 scsi_hba_fini(&arcmsr_modlinkage);
263 if (arcmsr_soft_state != NULL) {
264 ddi_soft_state_fini(&arcmsr_soft_state);
265 }
266 }
267 return (ret);
268 }
269
270
271 int
_fini(void)272 _fini(void)
273 {
274 int ret;
275
276 ret = mod_remove(&arcmsr_modlinkage);
277 if (ret == 0) {
278 /* if ret = 0 , said driver can remove */
279 scsi_hba_fini(&arcmsr_modlinkage);
280 if (arcmsr_soft_state != NULL) {
281 ddi_soft_state_fini(&arcmsr_soft_state);
282 }
283 }
284 return (ret);
285 }
286
287
288 int
_info(struct modinfo * modinfop)289 _info(struct modinfo *modinfop)
290 {
291 return (mod_info(&arcmsr_modlinkage, modinfop));
292 }
293
294
295 /*
296 * Function: arcmsr_attach(9E)
297 * Description: Set up all device state and allocate data structures,
298 * mutexes, condition variables, etc. for device operation.
299 * Set mt_attr property for driver to indicate MT-safety.
300 * Add interrupts needed.
301 * Input: dev_info_t *dev_info, ddi_attach_cmd_t cmd
302 * Output: Return DDI_SUCCESS if device is ready,
303 * else return DDI_FAILURE
304 */
305 static int
arcmsr_attach(dev_info_t * dev_info,ddi_attach_cmd_t cmd)306 arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd)
307 {
308 scsi_hba_tran_t *hba_trans;
309 struct ACB *acb;
310
311 switch (cmd) {
312 case DDI_ATTACH:
313 return (arcmsr_do_ddi_attach(dev_info,
314 ddi_get_instance(dev_info)));
315 case DDI_RESUME:
316 /*
317 * There is no hardware state to restart and no
318 * timeouts to restart since we didn't DDI_SUSPEND with
319 * active cmds or active timeouts We just need to
320 * unblock waiting threads and restart I/O the code
321 */
322 hba_trans = ddi_get_driver_private(dev_info);
323 if (hba_trans == NULL) {
324 return (DDI_FAILURE);
325 }
326 acb = hba_trans->tran_hba_private;
327 mutex_enter(&acb->acb_mutex);
328 arcmsr_iop_init(acb);
329
330 /* restart ccbs "timeout" watchdog */
331 acb->timeout_count = 0;
332 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
333 (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
334 acb->timeout_sc_id = timeout(arcmsr_devMap_monitor,
335 (caddr_t)acb,
336 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
337 mutex_exit(&acb->acb_mutex);
338 return (DDI_SUCCESS);
339
340 default:
341 return (DDI_FAILURE);
342 }
343 }
344
345 /*
346 * Function: arcmsr_detach(9E)
347 * Description: Remove all device allocation and system resources, disable
348 * device interrupt.
349 * Input: dev_info_t *dev_info
350 * ddi_detach_cmd_t cmd
351 * Output: Return DDI_SUCCESS if done,
352 * else returnDDI_FAILURE
353 */
354 static int
arcmsr_detach(dev_info_t * dev_info,ddi_detach_cmd_t cmd)355 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd) {
356
357 int instance;
358 struct ACB *acb;
359
360
361 instance = ddi_get_instance(dev_info);
362 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
363 if (acb == NULL)
364 return (DDI_FAILURE);
365
366 switch (cmd) {
367 case DDI_DETACH:
368 mutex_enter(&acb->acb_mutex);
369 if (acb->timeout_id != 0) {
370 mutex_exit(&acb->acb_mutex);
371 (void) untimeout(acb->timeout_id);
372 mutex_enter(&acb->acb_mutex);
373 acb->timeout_id = 0;
374 }
375 if (acb->timeout_sc_id != 0) {
376 mutex_exit(&acb->acb_mutex);
377 (void) untimeout(acb->timeout_sc_id);
378 mutex_enter(&acb->acb_mutex);
379 acb->timeout_sc_id = 0;
380 }
381 arcmsr_pcidev_disattach(acb);
382 /* Remove interrupt set up by ddi_add_intr */
383 arcmsr_remove_intr(acb);
384 /* unbind mapping object to handle */
385 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
386 /* Free ccb pool memory */
387 ddi_dma_mem_free(&acb->ccbs_acc_handle);
388 /* Free DMA handle */
389 ddi_dma_free_handle(&acb->ccbs_pool_handle);
390 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
391 if (scsi_hba_detach(dev_info) != DDI_SUCCESS)
392 arcmsr_warn(acb, "Unable to detach instance cleanly "
393 "(should not happen)");
394 /* free scsi_hba_transport from scsi_hba_tran_alloc */
395 scsi_hba_tran_free(acb->scsi_hba_transport);
396 ddi_taskq_destroy(acb->taskq);
397 ddi_prop_remove_all(dev_info);
398 mutex_exit(&acb->acb_mutex);
399 arcmsr_mutex_destroy(acb);
400 pci_config_teardown(&acb->pci_acc_handle);
401 ddi_set_driver_private(dev_info, NULL);
402 ddi_soft_state_free(arcmsr_soft_state, instance);
403 return (DDI_SUCCESS);
404 case DDI_SUSPEND:
405 mutex_enter(&acb->acb_mutex);
406 if (acb->timeout_id != 0) {
407 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
408 mutex_exit(&acb->acb_mutex);
409 (void) untimeout(acb->timeout_id);
410 (void) untimeout(acb->timeout_sc_id);
411 mutex_enter(&acb->acb_mutex);
412 acb->timeout_id = 0;
413 }
414
415 if (acb->timeout_sc_id != 0) {
416 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
417 mutex_exit(&acb->acb_mutex);
418 (void) untimeout(acb->timeout_sc_id);
419 mutex_enter(&acb->acb_mutex);
420 acb->timeout_sc_id = 0;
421 }
422
423 /* disable all outbound interrupt */
424 (void) arcmsr_disable_allintr(acb);
425 /* stop adapter background rebuild */
426 switch (acb->adapter_type) {
427 case ACB_ADAPTER_TYPE_A:
428 arcmsr_stop_hba_bgrb(acb);
429 arcmsr_flush_hba_cache(acb);
430 break;
431
432 case ACB_ADAPTER_TYPE_B:
433 arcmsr_stop_hbb_bgrb(acb);
434 arcmsr_flush_hbb_cache(acb);
435 break;
436
437 case ACB_ADAPTER_TYPE_C:
438 arcmsr_stop_hbc_bgrb(acb);
439 arcmsr_flush_hbc_cache(acb);
440 break;
441 }
442 mutex_exit(&acb->acb_mutex);
443 return (DDI_SUCCESS);
444 default:
445 return (DDI_FAILURE);
446 }
447 }
448
449 static int
arcmsr_reset(dev_info_t * resetdev,ddi_reset_cmd_t cmd)450 arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd)
451 {
452 struct ACB *acb;
453 scsi_hba_tran_t *scsi_hba_transport;
454 _NOTE(ARGUNUSED(cmd));
455
456 scsi_hba_transport = ddi_get_driver_private(resetdev);
457 if (scsi_hba_transport == NULL)
458 return (DDI_FAILURE);
459
460 acb = (struct ACB *)scsi_hba_transport->tran_hba_private;
461 if (!acb)
462 return (DDI_FAILURE);
463
464 arcmsr_pcidev_disattach(acb);
465
466 return (DDI_SUCCESS);
467 }
468
469 static int
arcmsr_cb_ioctl(dev_t dev,int ioctl_cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)470 arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg, int mode,
471 cred_t *credp, int *rvalp)
472 {
473 struct ACB *acb;
474 struct CMD_MESSAGE_FIELD *pktioctlfld;
475 int retvalue = 0;
476 int instance = MINOR2INST(getminor(dev));
477
478 if (instance < 0)
479 return (ENXIO);
480
481 if (secpolicy_sys_config(credp, B_FALSE) != 0)
482 return (EPERM);
483
484 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
485 if (acb == NULL)
486 return (ENXIO);
487
488 pktioctlfld = kmem_zalloc(sizeof (struct CMD_MESSAGE_FIELD), KM_SLEEP);
489
490 mutex_enter(&acb->ioctl_mutex);
491 if (ddi_copyin((void *)arg, pktioctlfld,
492 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0) {
493 retvalue = ENXIO;
494 goto ioctl_out;
495 }
496
497 if (memcmp(pktioctlfld->cmdmessage.Signature, "ARCMSR", 6) != 0) {
498 /* validity check */
499 retvalue = ENXIO;
500 goto ioctl_out;
501 }
502
503 switch ((unsigned int)ioctl_cmd) {
504 case ARCMSR_MESSAGE_READ_RQBUFFER:
505 {
506 uint8_t *ver_addr;
507 uint8_t *pQbuffer, *ptmpQbuffer;
508 int32_t allxfer_len = 0;
509
510 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
511 ptmpQbuffer = ver_addr;
512 while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
513 (allxfer_len < (MSGDATABUFLEN - 1))) {
514 /* copy READ QBUFFER to srb */
515 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
516 (void) memcpy(ptmpQbuffer, pQbuffer, 1);
517 acb->rqbuf_firstidx++;
518 /* if last index number set it to 0 */
519 acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
520 ptmpQbuffer++;
521 allxfer_len++;
522 }
523
524 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
525 struct QBUFFER *prbuffer;
526 uint8_t *pQbuffer;
527 uint8_t *iop_data;
528 int32_t iop_len;
529
530 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
531 prbuffer = arcmsr_get_iop_rqbuffer(acb);
532 iop_data = (uint8_t *)prbuffer->data;
533 iop_len = (int32_t)prbuffer->data_len;
534 /*
535 * this iop data does no chance to make me overflow
536 * again here, so just do it
537 */
538 while (iop_len > 0) {
539 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
540 (void) memcpy(pQbuffer, iop_data, 1);
541 acb->rqbuf_lastidx++;
542 /* if last index number set it to 0 */
543 acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
544 iop_data++;
545 iop_len--;
546 }
547 /* let IOP know data has been read */
548 arcmsr_iop_message_read(acb);
549 }
550 (void) memcpy(pktioctlfld->messagedatabuffer,
551 ver_addr, allxfer_len);
552 pktioctlfld->cmdmessage.Length = allxfer_len;
553 pktioctlfld->cmdmessage.ReturnCode =
554 ARCMSR_MESSAGE_RETURNCODE_OK;
555
556 if (ddi_copyout(pktioctlfld, (void *)arg,
557 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
558 retvalue = ENXIO;
559
560 kmem_free(ver_addr, MSGDATABUFLEN);
561 break;
562 }
563
564 case ARCMSR_MESSAGE_WRITE_WQBUFFER:
565 {
566 uint8_t *ver_addr;
567 int32_t my_empty_len, user_len;
568 int32_t wqbuf_firstidx, wqbuf_lastidx;
569 uint8_t *pQbuffer, *ptmpuserbuffer;
570
571 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
572
573 ptmpuserbuffer = ver_addr;
574 user_len = min(pktioctlfld->cmdmessage.Length,
575 MSGDATABUFLEN);
576 (void) memcpy(ptmpuserbuffer,
577 pktioctlfld->messagedatabuffer, user_len);
578 /*
579 * check ifdata xfer length of this request will overflow
580 * my array qbuffer
581 */
582 wqbuf_lastidx = acb->wqbuf_lastidx;
583 wqbuf_firstidx = acb->wqbuf_firstidx;
584 if (wqbuf_lastidx != wqbuf_firstidx) {
585 arcmsr_post_ioctldata2iop(acb);
586 pktioctlfld->cmdmessage.ReturnCode =
587 ARCMSR_MESSAGE_RETURNCODE_ERROR;
588 } else {
589 my_empty_len = (wqbuf_firstidx - wqbuf_lastidx - 1)
590 & (ARCMSR_MAX_QBUFFER - 1);
591 if (my_empty_len >= user_len) {
592 while (user_len > 0) {
593 /* copy srb data to wqbuffer */
594 pQbuffer =
595 &acb->wqbuffer[acb->wqbuf_lastidx];
596 (void) memcpy(pQbuffer,
597 ptmpuserbuffer, 1);
598 acb->wqbuf_lastidx++;
599 /* iflast index number set it to 0 */
600 acb->wqbuf_lastidx %=
601 ARCMSR_MAX_QBUFFER;
602 ptmpuserbuffer++;
603 user_len--;
604 }
605 /* post first Qbuffer */
606 if (acb->acb_flags &
607 ACB_F_MESSAGE_WQBUFFER_CLEARED) {
608 acb->acb_flags &=
609 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
610 arcmsr_post_ioctldata2iop(acb);
611 }
612 pktioctlfld->cmdmessage.ReturnCode =
613 ARCMSR_MESSAGE_RETURNCODE_OK;
614 } else {
615 pktioctlfld->cmdmessage.ReturnCode =
616 ARCMSR_MESSAGE_RETURNCODE_ERROR;
617 }
618 }
619 if (ddi_copyout(pktioctlfld, (void *)arg,
620 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
621 retvalue = ENXIO;
622
623 kmem_free(ver_addr, MSGDATABUFLEN);
624 break;
625 }
626
627 case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
628 {
629 uint8_t *pQbuffer = acb->rqbuffer;
630
631 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
632 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
633 arcmsr_iop_message_read(acb);
634 }
635 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
636 acb->rqbuf_firstidx = 0;
637 acb->rqbuf_lastidx = 0;
638 bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
639 /* report success */
640 pktioctlfld->cmdmessage.ReturnCode =
641 ARCMSR_MESSAGE_RETURNCODE_OK;
642
643 if (ddi_copyout(pktioctlfld, (void *)arg,
644 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
645 retvalue = ENXIO;
646 break;
647 }
648
649 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
650 {
651 uint8_t *pQbuffer = acb->wqbuffer;
652
653 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
654 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
655 arcmsr_iop_message_read(acb);
656 }
657 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
658 ACB_F_MESSAGE_WQBUFFER_READ);
659 acb->wqbuf_firstidx = 0;
660 acb->wqbuf_lastidx = 0;
661 bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
662 /* report success */
663 pktioctlfld->cmdmessage.ReturnCode =
664 ARCMSR_MESSAGE_RETURNCODE_OK;
665
666 if (ddi_copyout(pktioctlfld, (void *)arg,
667 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
668 retvalue = ENXIO;
669 break;
670 }
671
672 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
673 {
674 uint8_t *pQbuffer;
675
676 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
677 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
678 arcmsr_iop_message_read(acb);
679 }
680 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
681 ACB_F_MESSAGE_RQBUFFER_CLEARED |
682 ACB_F_MESSAGE_WQBUFFER_READ);
683 acb->rqbuf_firstidx = 0;
684 acb->rqbuf_lastidx = 0;
685 acb->wqbuf_firstidx = 0;
686 acb->wqbuf_lastidx = 0;
687 pQbuffer = acb->rqbuffer;
688 bzero(pQbuffer, sizeof (struct QBUFFER));
689 pQbuffer = acb->wqbuffer;
690 bzero(pQbuffer, sizeof (struct QBUFFER));
691 /* report success */
692 pktioctlfld->cmdmessage.ReturnCode =
693 ARCMSR_MESSAGE_RETURNCODE_OK;
694 if (ddi_copyout(pktioctlfld, (void *)arg,
695 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
696 retvalue = ENXIO;
697 break;
698 }
699
700 case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
701 pktioctlfld->cmdmessage.ReturnCode =
702 ARCMSR_MESSAGE_RETURNCODE_3F;
703 if (ddi_copyout(pktioctlfld, (void *)arg,
704 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
705 retvalue = ENXIO;
706 break;
707
708 /* Not supported: ARCMSR_MESSAGE_SAY_HELLO */
709 case ARCMSR_MESSAGE_SAY_GOODBYE:
710 arcmsr_iop_parking(acb);
711 break;
712
713 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
714 switch (acb->adapter_type) {
715 case ACB_ADAPTER_TYPE_A:
716 arcmsr_flush_hba_cache(acb);
717 break;
718 case ACB_ADAPTER_TYPE_B:
719 arcmsr_flush_hbb_cache(acb);
720 break;
721 case ACB_ADAPTER_TYPE_C:
722 arcmsr_flush_hbc_cache(acb);
723 break;
724 }
725 break;
726
727 default:
728 mutex_exit(&acb->ioctl_mutex);
729 kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
730 return (scsi_hba_ioctl(dev, ioctl_cmd, arg, mode, credp,
731 rvalp));
732 }
733
734 ioctl_out:
735 kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
736 mutex_exit(&acb->ioctl_mutex);
737
738 return (retvalue);
739 }
740
741
742 /*
743 * Function: arcmsr_tran_tgt_init
744 * Description: Called when initializing a target device instance. If
745 * no per-target initialization is required, the HBA
746 * may leave tran_tgt_init to NULL
747 * Input:
748 * dev_info_t *host_dev_info,
749 * dev_info_t *target_dev_info,
750 * scsi_hba_tran_t *tran,
751 * struct scsi_device *sd
752 *
753 * Return: DDI_SUCCESS if success, else return DDI_FAILURE
754 *
755 * entry point enables the HBA to allocate and/or initialize any per-
756 * target resources.
757 * It also enables the HBA to qualify the device's address as valid and
758 * supportable for that particular HBA.
759 * By returning DDI_FAILURE, the instance of the target driver for that
760 * device will not be probed or attached.
761 * This entry point is not required, and if none is supplied,
762 * the framework will attempt to probe and attach all possible instances
763 * of the appropriate target drivers.
764 */
765 static int
arcmsr_tran_tgt_init(dev_info_t * host_dev_info,dev_info_t * target_dev_info,scsi_hba_tran_t * tran,struct scsi_device * sd)766 arcmsr_tran_tgt_init(dev_info_t *host_dev_info, dev_info_t *target_dev_info,
767 scsi_hba_tran_t *tran, struct scsi_device *sd)
768 {
769 uint16_t target;
770 uint8_t lun;
771 struct ACB *acb = tran->tran_hba_private;
772
773 _NOTE(ARGUNUSED(tran, target_dev_info, host_dev_info))
774
775 target = sd->sd_address.a_target;
776 lun = sd->sd_address.a_lun;
777 if ((target >= ARCMSR_MAX_TARGETID) || (lun >= ARCMSR_MAX_TARGETLUN)) {
778 return (DDI_FAILURE);
779 }
780
781
782 if (ndi_dev_is_persistent_node(target_dev_info) == 0) {
783 /*
784 * If no persistent node exist, we don't allow .conf node
785 * to be created.
786 */
787 if (arcmsr_find_child(acb, target, lun) != NULL) {
788 if ((ndi_merge_node(target_dev_info,
789 arcmsr_name_node) != DDI_SUCCESS)) {
790 return (DDI_SUCCESS);
791 }
792 }
793 return (DDI_FAILURE);
794 }
795
796 return (DDI_SUCCESS);
797 }
798
799 /*
800 * Function: arcmsr_tran_getcap(9E)
801 * Description: Get the capability named, and returnits value.
802 * Return Values: current value of capability, ifdefined
803 * -1 ifcapability is not defined
804 * ------------------------------------------------------
805 * Common Capability Strings Array
806 * ------------------------------------------------------
807 * #define SCSI_CAP_DMA_MAX 0
808 * #define SCSI_CAP_MSG_OUT 1
809 * #define SCSI_CAP_DISCONNECT 2
810 * #define SCSI_CAP_SYNCHRONOUS 3
811 * #define SCSI_CAP_WIDE_XFER 4
812 * #define SCSI_CAP_PARITY 5
813 * #define SCSI_CAP_INITIATOR_ID 6
814 * #define SCSI_CAP_UNTAGGED_QING 7
815 * #define SCSI_CAP_TAGGED_QING 8
816 * #define SCSI_CAP_ARQ 9
817 * #define SCSI_CAP_LINKED_CMDS 10 a
818 * #define SCSI_CAP_SECTOR_SIZE 11 b
819 * #define SCSI_CAP_TOTAL_SECTORS 12 c
820 * #define SCSI_CAP_GEOMETRY 13 d
821 * #define SCSI_CAP_RESET_NOTIFICATION 14 e
822 * #define SCSI_CAP_QFULL_RETRIES 15 f
823 * #define SCSI_CAP_QFULL_RETRY_INTERVAL 16 10
824 * #define SCSI_CAP_SCSI_VERSION 17 11
825 * #define SCSI_CAP_INTERCONNECT_TYPE 18 12
826 * #define SCSI_CAP_LUN_RESET 19 13
827 */
828 static int
arcmsr_tran_getcap(struct scsi_address * ap,char * cap,int whom)829 arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
830 {
831 int capability = 0;
832 struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
833
834 if (cap == NULL || whom == 0) {
835 return (DDI_FAILURE);
836 }
837
838 mutex_enter(&acb->acb_mutex);
839 if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
840 mutex_exit(&acb->acb_mutex);
841 return (-1);
842 }
843 switch (scsi_hba_lookup_capstr(cap)) {
844 case SCSI_CAP_MSG_OUT:
845 case SCSI_CAP_DISCONNECT:
846 case SCSI_CAP_WIDE_XFER:
847 case SCSI_CAP_TAGGED_QING:
848 case SCSI_CAP_UNTAGGED_QING:
849 case SCSI_CAP_PARITY:
850 case SCSI_CAP_ARQ:
851 capability = 1;
852 break;
853 case SCSI_CAP_SECTOR_SIZE:
854 capability = ARCMSR_DEV_SECTOR_SIZE;
855 break;
856 case SCSI_CAP_DMA_MAX:
857 /* Limit to 16MB max transfer */
858 capability = ARCMSR_MAX_XFER_LEN;
859 break;
860 case SCSI_CAP_INITIATOR_ID:
861 capability = ARCMSR_SCSI_INITIATOR_ID;
862 break;
863 case SCSI_CAP_GEOMETRY:
864 /* head , track , cylinder */
865 capability = (255 << 16) | 63;
866 break;
867 default:
868 capability = -1;
869 break;
870 }
871 mutex_exit(&acb->acb_mutex);
872 return (capability);
873 }
874
875 /*
876 * Function: arcmsr_tran_setcap(9E)
877 * Description: Set the specific capability.
878 * Return Values: 1 - capability exists and can be set to new value
879 * 0 - capability could not be set to new value
880 * -1 - no such capability
881 */
882 static int
arcmsr_tran_setcap(struct scsi_address * ap,char * cap,int value,int whom)883 arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
884 {
885 _NOTE(ARGUNUSED(value))
886
887 int supported = 0;
888 struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
889
890 if (cap == NULL || whom == 0) {
891 return (-1);
892 }
893
894 mutex_enter(&acb->acb_mutex);
895 if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
896 mutex_exit(&acb->acb_mutex);
897 return (-1);
898 }
899 switch (supported = scsi_hba_lookup_capstr(cap)) {
900 case SCSI_CAP_ARQ: /* 9 auto request sense */
901 case SCSI_CAP_UNTAGGED_QING: /* 7 */
902 case SCSI_CAP_TAGGED_QING: /* 8 */
903 /* these are always on, and cannot be turned off */
904 supported = (value == 1) ? 1 : 0;
905 break;
906 case SCSI_CAP_TOTAL_SECTORS: /* c */
907 supported = 1;
908 break;
909 case SCSI_CAP_DISCONNECT: /* 2 */
910 case SCSI_CAP_WIDE_XFER: /* 4 */
911 case SCSI_CAP_INITIATOR_ID: /* 6 */
912 case SCSI_CAP_DMA_MAX: /* 0 */
913 case SCSI_CAP_MSG_OUT: /* 1 */
914 case SCSI_CAP_PARITY: /* 5 */
915 case SCSI_CAP_LINKED_CMDS: /* a */
916 case SCSI_CAP_RESET_NOTIFICATION: /* e */
917 case SCSI_CAP_SECTOR_SIZE: /* b */
918 /* these are not settable */
919 supported = 0;
920 break;
921 default:
922 supported = -1;
923 break;
924 }
925 mutex_exit(&acb->acb_mutex);
926 return (supported);
927 }
928
929
930 /*
931 * Function: arcmsr_tran_init_pkt
932 * Return Values: pointer to scsi_pkt, or NULL
933 * Description: simultaneously allocate both a scsi_pkt(9S) structure and
934 * DMA resources for that pkt.
935 * Called by kernel on behalf of a target driver
936 * calling scsi_init_pkt(9F).
937 * Refer to tran_init_pkt(9E) man page
938 * Context: Can be called from different kernel process threads.
939 * Can be called by interrupt thread.
940 * Allocates SCSI packet and DMA resources
941 */
942 static struct
arcmsr_tran_init_pkt(struct scsi_address * ap,register struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)943 scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
944 register struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
945 int tgtlen, int flags, int (*callback)(), caddr_t arg)
946 {
947 struct CCB *ccb;
948 struct ARCMSR_CDB *arcmsr_cdb;
949 struct ACB *acb;
950 int old_pkt_flag;
951
952 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
953
954 if (acb->acb_flags & ACB_F_BUS_RESET) {
955 return (NULL);
956 }
957 if (pkt == NULL) {
958 /* get free CCB */
959 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
960 DDI_DMA_SYNC_FORKERNEL);
961 ccb = arcmsr_get_freeccb(acb);
962 if (ccb == (struct CCB *)NULL) {
963 return (NULL);
964 }
965
966 if (statuslen < sizeof (struct scsi_arq_status)) {
967 statuslen = sizeof (struct scsi_arq_status);
968 }
969 pkt = scsi_hba_pkt_alloc(acb->dev_info, ap, cmdlen,
970 statuslen, tgtlen, sizeof (void *), callback, arg);
971 if (pkt == NULL) {
972 arcmsr_warn(acb, "scsi pkt allocation failed");
973 arcmsr_free_ccb(ccb);
974 return (NULL);
975 }
976 /* Initialize CCB */
977 ccb->pkt = pkt;
978 ccb->pkt_dma_handle = NULL;
979 /* record how many sg are needed to xfer on this pkt */
980 ccb->pkt_ncookies = 0;
981 /* record how many sg we got from this window */
982 ccb->pkt_cookie = 0;
983 /* record how many windows have partial dma map set */
984 ccb->pkt_nwin = 0;
985 /* record current sg window position */
986 ccb->pkt_curwin = 0;
987 ccb->pkt_dma_len = 0;
988 ccb->pkt_dma_offset = 0;
989 ccb->resid_dmacookie.dmac_size = 0;
990
991 /*
992 * we will still use this point for we want to fake some
993 * information in tran_start
994 */
995 ccb->bp = bp;
996
997 /* Initialize arcmsr_cdb */
998 arcmsr_cdb = &ccb->arcmsr_cdb;
999 bzero(arcmsr_cdb, sizeof (struct ARCMSR_CDB));
1000 arcmsr_cdb->Bus = 0;
1001 arcmsr_cdb->Function = 1;
1002 arcmsr_cdb->LUN = ap->a_lun;
1003 arcmsr_cdb->TargetID = ap->a_target;
1004 arcmsr_cdb->CdbLength = (uint8_t)cmdlen;
1005 arcmsr_cdb->Context = (uintptr_t)arcmsr_cdb;
1006
1007 /* Fill in the rest of the structure */
1008 pkt->pkt_ha_private = ccb;
1009 pkt->pkt_address = *ap;
1010 pkt->pkt_comp = NULL;
1011 pkt->pkt_flags = 0;
1012 pkt->pkt_time = 0;
1013 pkt->pkt_resid = 0;
1014 pkt->pkt_statistics = 0;
1015 pkt->pkt_reason = 0;
1016 old_pkt_flag = 0;
1017 } else {
1018 ccb = pkt->pkt_ha_private;
1019 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1020 if (!(ccb->ccb_state & ARCMSR_CCB_BACK)) {
1021 return (NULL);
1022 }
1023 }
1024
1025 /*
1026 * you cannot update CdbLength with cmdlen here, it would
1027 * cause a data compare error
1028 */
1029 ccb->ccb_state = ARCMSR_CCB_UNBUILD;
1030 old_pkt_flag = 1;
1031 }
1032
1033 /* Second step : dma allocation/move */
1034 if (bp && bp->b_bcount != 0) {
1035 /*
1036 * system had a lot of data trunk need to xfer, from...20 byte
1037 * to 819200 byte.
1038 * arcmsr_dma_alloc will get pkt_dma_handle (not null) until
1039 * this lot of data trunk xfer done this mission will be done
1040 * by some of continue READ or WRITE scsi command, till this
1041 * lot of data trunk xfer completed.
1042 * arcmsr_dma_move do the action repeatedly, and use the same
1043 * ccb till this lot of data trunk xfer complete notice.
1044 * when after the arcmsr_tran_init_pkt returns the solaris
1045 * kernel is by your pkt_resid and its b_bcount to give you
1046 * which type of scsi command descriptor to implement the
1047 * length of folowing arcmsr_tran_start scsi cdb (data length)
1048 *
1049 * Each transfer should be aligned on a 512 byte boundary
1050 */
1051 if (ccb->pkt_dma_handle == NULL) {
1052 if (arcmsr_dma_alloc(acb, pkt, bp, flags, callback) ==
1053 DDI_FAILURE) {
1054 /*
1055 * the HBA driver is unable to allocate DMA
1056 * resources, it must free the allocated
1057 * scsi_pkt(9S) before returning
1058 */
1059 arcmsr_warn(acb, "dma allocation failure");
1060 if (old_pkt_flag == 0) {
1061 arcmsr_warn(acb, "dma "
1062 "allocation failed to free "
1063 "scsi hba pkt");
1064 arcmsr_free_ccb(ccb);
1065 scsi_hba_pkt_free(ap, pkt);
1066 }
1067 return (NULL);
1068 }
1069 } else {
1070 /* DMA resources to next DMA window, for old pkt */
1071 if (arcmsr_dma_move(acb, pkt, bp) == DDI_FAILURE) {
1072 arcmsr_warn(acb, "dma move failed");
1073 return (NULL);
1074 }
1075 }
1076 } else {
1077 pkt->pkt_resid = 0;
1078 }
1079 return (pkt);
1080 }
1081
1082 /*
1083 * Function: arcmsr_tran_start(9E)
1084 * Description: Transport the command in pktp to the target device.
1085 * The command is not finished when this returns, only
1086 * sent to the target; arcmsr_intr_handler will call
1087 * scsi_hba_pkt_comp(pktp) when the target device has done.
1088 *
1089 * Input: struct scsi_address *ap, struct scsi_pkt *pktp
1090 * Output: TRAN_ACCEPT if pkt is OK and not driver not busy
1091 * TRAN_BUSY if driver is
1092 * TRAN_BADPKT if pkt is invalid
1093 */
1094 static int
arcmsr_tran_start(struct scsi_address * ap,struct scsi_pkt * pkt)1095 arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1096 {
1097 struct ACB *acb;
1098 struct CCB *ccb;
1099 int target = ap->a_target;
1100 int lun = ap->a_lun;
1101
1102 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1103 ccb = pkt->pkt_ha_private;
1104 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1105
1106 if ((ccb->ccb_flags & CCB_FLAG_DMAVALID) &&
1107 (ccb->ccb_flags & DDI_DMA_CONSISTENT))
1108 (void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1109 DDI_DMA_SYNC_FORDEV);
1110
1111 if (ccb->ccb_state == ARCMSR_CCB_UNBUILD)
1112 arcmsr_build_ccb(ccb);
1113
1114 if (acb->acb_flags & ACB_F_BUS_RESET) {
1115 pkt->pkt_reason = CMD_RESET;
1116 pkt->pkt_statistics |= STAT_BUS_RESET;
1117 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1118 STATE_SENT_CMD | STATE_GOT_STATUS);
1119 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1120 (pkt->pkt_state & STATE_XFERRED_DATA))
1121 (void) ddi_dma_sync(ccb->pkt_dma_handle,
1122 0, 0, DDI_DMA_SYNC_FORCPU);
1123
1124 scsi_hba_pkt_comp(pkt);
1125 return (TRAN_ACCEPT);
1126 }
1127
1128 /* IMPORTANT: Target 16 is a virtual device for iop message transfer */
1129 if (target == 16) {
1130
1131 struct buf *bp = ccb->bp;
1132 uint8_t scsicmd = pkt->pkt_cdbp[0];
1133
1134 switch (scsicmd) {
1135 case SCMD_INQUIRY: {
1136 if (lun != 0) {
1137 ccb->pkt->pkt_reason = CMD_TIMEOUT;
1138 ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1139 arcmsr_ccb_complete(ccb, 0);
1140 return (TRAN_ACCEPT);
1141 }
1142
1143 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1144 uint8_t inqdata[36];
1145
1146 /* The EVDP and pagecode is not supported */
1147 if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[2]) {
1148 inqdata[1] = 0xFF;
1149 inqdata[2] = 0x00;
1150 } else {
1151 /* Periph Qualifier & Periph Dev Type */
1152 inqdata[0] = DTYPE_PROCESSOR;
1153 /* rem media bit & Dev Type Modifier */
1154 inqdata[1] = 0;
1155 /* ISO, ECMA, & ANSI versions */
1156 inqdata[2] = 0;
1157 inqdata[3] = 0;
1158 /* length of additional data */
1159 inqdata[4] = 31;
1160 /* Vendor Identification */
1161 bcopy("Areca ", &inqdata[8], VIDLEN);
1162 /* Product Identification */
1163 bcopy("RAID controller ", &inqdata[16],
1164 PIDLEN);
1165 /* Product Revision */
1166 bcopy(&inqdata[32], "R001", REVLEN);
1167 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1168 bp_mapin(bp);
1169
1170 (void) memcpy(bp->b_un.b_addr,
1171 inqdata, sizeof (inqdata));
1172 }
1173 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1174 }
1175 arcmsr_ccb_complete(ccb, 0);
1176 return (TRAN_ACCEPT);
1177 }
1178 case SCMD_WRITE_BUFFER:
1179 case SCMD_READ_BUFFER: {
1180 if (arcmsr_iop_message_xfer(acb, pkt)) {
1181 /* error just for retry */
1182 ccb->pkt->pkt_reason = CMD_TRAN_ERR;
1183 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
1184 }
1185 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1186 arcmsr_ccb_complete(ccb, 0);
1187 return (TRAN_ACCEPT);
1188 }
1189 default:
1190 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1191 arcmsr_ccb_complete(ccb, 0);
1192 return (TRAN_ACCEPT);
1193 }
1194 }
1195
1196 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1197 uint8_t block_cmd;
1198
1199 block_cmd = pkt->pkt_cdbp[0] & 0x0f;
1200 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1201 pkt->pkt_reason = CMD_TIMEOUT;
1202 pkt->pkt_statistics |= STAT_TIMEOUT;
1203 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1204 STATE_SENT_CMD | STATE_GOT_STATUS);
1205 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1206 (pkt->pkt_state & STATE_XFERRED_DATA)) {
1207 (void) ddi_dma_sync(ccb->pkt_dma_handle,
1208 ccb->pkt_dma_offset,
1209 ccb->pkt_dma_len, DDI_DMA_SYNC_FORCPU);
1210 }
1211 scsi_hba_pkt_comp(pkt);
1212 return (TRAN_ACCEPT);
1213 }
1214 }
1215 mutex_enter(&acb->postq_mutex);
1216 if (acb->ccboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
1217 ccb->ccb_state = ARCMSR_CCB_RETRY;
1218 mutex_exit(&acb->postq_mutex);
1219 return (TRAN_BUSY);
1220 } else if (arcmsr_post_ccb(acb, ccb) == DDI_FAILURE) {
1221 arcmsr_warn(acb, "post ccb failure, ccboutstandingcount = %d",
1222 acb->ccboutstandingcount);
1223 mutex_exit(&acb->postq_mutex);
1224 return (TRAN_FATAL_ERROR);
1225 }
1226 mutex_exit(&acb->postq_mutex);
1227 return (TRAN_ACCEPT);
1228 }
1229
1230 /*
1231 * Function name: arcmsr_tran_destroy_pkt
1232 * Return Values: none
1233 * Description: Called by kernel on behalf of a target driver
1234 * calling scsi_destroy_pkt(9F).
1235 * Refer to tran_destroy_pkt(9E) man page
1236 * Context: Can be called from different kernel process threads.
1237 * Can be called by interrupt thread.
1238 */
1239 static void
arcmsr_tran_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1240 arcmsr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1241 {
1242 struct CCB *ccb = pkt->pkt_ha_private;
1243 ddi_dma_handle_t pkt_dma_handle = ccb->pkt_dma_handle;
1244
1245 if (ccb == NULL) {
1246 return;
1247 }
1248 if (ccb->pkt != pkt) {
1249 return;
1250 }
1251 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1252 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1253 if (pkt_dma_handle) {
1254 (void) ddi_dma_unbind_handle(ccb->pkt_dma_handle);
1255 }
1256 }
1257 if (pkt_dma_handle) {
1258 (void) ddi_dma_free_handle(&pkt_dma_handle);
1259 }
1260 pkt->pkt_ha_private = NULL;
1261 if (ccb) {
1262 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1263 if (ccb->ccb_state & ARCMSR_CCB_BACK) {
1264 arcmsr_free_ccb(ccb);
1265 } else {
1266 ccb->ccb_state |= ARCMSR_CCB_WAIT4_FREE;
1267 }
1268 } else {
1269 arcmsr_free_ccb(ccb);
1270 }
1271 }
1272 scsi_hba_pkt_free(ap, pkt);
1273 }
1274
1275 /*
1276 * Function name: arcmsr_tran_dmafree()
1277 * Return Values: none
1278 * Description: free dvma resources
1279 * Context: Can be called from different kernel process threads.
1280 * Can be called by interrupt thread.
1281 */
1282 static void
arcmsr_tran_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)1283 arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1284 {
1285 struct CCB *ccb = pkt->pkt_ha_private;
1286
1287 if ((ccb == NULL) || (ccb->pkt != pkt)) {
1288 return;
1289 }
1290 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1291 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1292 if (ddi_dma_unbind_handle(ccb->pkt_dma_handle) != DDI_SUCCESS) {
1293 arcmsr_warn(ccb->acb, "ddi_dma_unbind_handle() failed "
1294 "(target %d lun %d)", ap->a_target, ap->a_lun);
1295 }
1296 ddi_dma_free_handle(&ccb->pkt_dma_handle);
1297 ccb->pkt_dma_handle = NULL;
1298 }
1299 }
1300
1301 /*
1302 * Function name: arcmsr_tran_sync_pkt()
1303 * Return Values: none
1304 * Description: sync dma
1305 * Context: Can be called from different kernel process threads.
1306 * Can be called by interrupt thread.
1307 */
1308 static void
arcmsr_tran_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1309 arcmsr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1310 {
1311 struct CCB *ccb;
1312
1313 ccb = pkt->pkt_ha_private;
1314 if ((ccb == NULL) || (ccb->pkt != pkt)) {
1315 return;
1316 }
1317 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1318 if (ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1319 (ccb->ccb_flags & CCB_FLAG_DMAWRITE) ?
1320 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1321 DDI_SUCCESS) {
1322 arcmsr_warn(ccb->acb,
1323 "sync pkt failed for target %d lun %d",
1324 ap->a_target, ap->a_lun);
1325 }
1326 }
1327 }
1328
1329
1330 /*
1331 * Function: arcmsr_tran_abort(9E)
1332 * SCSA interface routine to abort pkt(s) in progress.
1333 * Aborts the pkt specified. If NULL pkt, aborts ALL pkts.
1334 * Output: Return 1 if success
1335 * Return 0 if failure
1336 */
1337 static int
arcmsr_tran_abort(struct scsi_address * ap,struct scsi_pkt * abortpkt)1338 arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *abortpkt)
1339 {
1340 struct ACB *acb;
1341 int return_code;
1342
1343 acb = ap->a_hba_tran->tran_hba_private;
1344
1345 while (acb->ccboutstandingcount != 0) {
1346 drv_usecwait(10000);
1347 }
1348
1349 mutex_enter(&acb->isr_mutex);
1350 return_code = arcmsr_seek_cmd2abort(acb, abortpkt);
1351 mutex_exit(&acb->isr_mutex);
1352
1353 if (return_code != DDI_SUCCESS) {
1354 arcmsr_warn(acb, "abort command failed for target %d lun %d",
1355 ap->a_target, ap->a_lun);
1356 return (0);
1357 }
1358 return (1);
1359 }
1360
1361 /*
1362 * Function: arcmsr_tran_reset(9E)
1363 * SCSA interface routine to perform scsi resets on either
1364 * a specified target or the bus (default).
1365 * Output: Return 1 if success
1366 * Return 0 if failure
1367 */
1368 static int
arcmsr_tran_reset(struct scsi_address * ap,int level)1369 arcmsr_tran_reset(struct scsi_address *ap, int level) {
1370
1371 struct ACB *acb;
1372 int return_code = 1;
1373 int target = ap->a_target;
1374 int lun = ap->a_lun;
1375
1376 /* Are we in the middle of dumping core? */
1377 if (ddi_in_panic())
1378 return (return_code);
1379
1380 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1381 mutex_enter(&acb->isr_mutex);
1382 switch (level) {
1383 case RESET_ALL: /* 0 */
1384 acb->num_resets++;
1385 acb->acb_flags |= ACB_F_BUS_RESET;
1386 if (acb->timeout_count) {
1387 if (arcmsr_iop_reset(acb) != 0) {
1388 arcmsr_handle_iop_bus_hold(acb);
1389 acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1390 }
1391 }
1392 acb->acb_flags &= ~ACB_F_BUS_RESET;
1393 break;
1394 case RESET_TARGET: /* 1 */
1395 if (acb->devstate[target][lun] == ARECA_RAID_GONE)
1396 return_code = 0;
1397 break;
1398 case RESET_BUS: /* 2 */
1399 return_code = 0;
1400 break;
1401 case RESET_LUN: /* 3 */
1402 return_code = 0;
1403 break;
1404 default:
1405 return_code = 0;
1406 }
1407 mutex_exit(&acb->isr_mutex);
1408 return (return_code);
1409 }
1410
1411 static int
arcmsr_tran_bus_config(dev_info_t * parent,uint_t flags,ddi_bus_config_op_t op,void * arg,dev_info_t ** childp)1412 arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
1413 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1414 {
1415 struct ACB *acb;
1416 int circ = 0;
1417 int rval;
1418 int tgt, lun;
1419
1420 if ((acb = ddi_get_soft_state(arcmsr_soft_state,
1421 ddi_get_instance(parent))) == NULL)
1422 return (NDI_FAILURE);
1423
1424 ndi_devi_enter(parent, &circ);
1425 switch (op) {
1426 case BUS_CONFIG_ONE:
1427 if (arcmsr_parse_devname(arg, &tgt, &lun) != 0) {
1428 rval = NDI_FAILURE;
1429 break;
1430 }
1431 if (acb->device_map[tgt] & 1 << lun) {
1432 acb->devstate[tgt][lun] = ARECA_RAID_GOOD;
1433 rval = arcmsr_config_lun(acb, tgt, lun, childp);
1434 }
1435 break;
1436
1437 case BUS_CONFIG_DRIVER:
1438 case BUS_CONFIG_ALL:
1439 for (tgt = 0; tgt < ARCMSR_MAX_TARGETID; tgt++)
1440 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1441 if (acb->device_map[tgt] & 1 << lun) {
1442 acb->devstate[tgt][lun] =
1443 ARECA_RAID_GOOD;
1444 (void) arcmsr_config_lun(acb, tgt,
1445 lun, NULL);
1446 }
1447
1448 rval = NDI_SUCCESS;
1449 break;
1450 }
1451 if (rval == NDI_SUCCESS)
1452 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
1453 ndi_devi_exit(parent, circ);
1454 return (rval);
1455 }
1456
1457 /*
1458 * Function name: arcmsr_dma_alloc
1459 * Return Values: 0 if successful, -1 if failure
1460 * Description: allocate DMA resources
1461 * Context: Can only be called from arcmsr_tran_init_pkt()
1462 * register struct scsi_address *ap = &((pkt)->pkt_address);
1463 */
1464 static int
arcmsr_dma_alloc(struct ACB * acb,struct scsi_pkt * pkt,struct buf * bp,int flags,int (* callback)())1465 arcmsr_dma_alloc(struct ACB *acb, struct scsi_pkt *pkt,
1466 struct buf *bp, int flags, int (*callback)())
1467 {
1468 struct CCB *ccb = pkt->pkt_ha_private;
1469 int alloc_result, map_method, dma_flags;
1470 int resid = 0;
1471 int total_ccb_xferlen = 0;
1472 int (*cb)(caddr_t);
1473 uint8_t i;
1474
1475 /*
1476 * at this point the PKT SCSI CDB is empty, and dma xfer length
1477 * is bp->b_bcount
1478 */
1479
1480 if (bp->b_flags & B_READ) {
1481 ccb->ccb_flags &= ~CCB_FLAG_DMAWRITE;
1482 dma_flags = DDI_DMA_READ;
1483 } else {
1484 ccb->ccb_flags |= CCB_FLAG_DMAWRITE;
1485 dma_flags = DDI_DMA_WRITE;
1486 }
1487
1488 if (flags & PKT_CONSISTENT) {
1489 ccb->ccb_flags |= CCB_FLAG_DMACONSISTENT;
1490 dma_flags |= DDI_DMA_CONSISTENT;
1491 }
1492 if (flags & PKT_DMA_PARTIAL) {
1493 dma_flags |= DDI_DMA_PARTIAL;
1494 }
1495
1496 dma_flags |= DDI_DMA_REDZONE;
1497 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1498
1499 alloc_result = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_dma_attr,
1500 cb, 0, &ccb->pkt_dma_handle);
1501 if (alloc_result != DDI_SUCCESS) {
1502 arcmsr_warn(acb, "dma allocate failed (%x)", alloc_result);
1503 return (DDI_FAILURE);
1504 }
1505
1506 map_method = ddi_dma_buf_bind_handle(ccb->pkt_dma_handle,
1507 bp, dma_flags, cb, 0,
1508 &ccb->pkt_dmacookies[0], /* SG List pointer */
1509 &ccb->pkt_ncookies); /* number of sgl cookies */
1510
1511 switch (map_method) {
1512 case DDI_DMA_PARTIAL_MAP:
1513 /*
1514 * When your main memory size larger then 4G
1515 * DDI_DMA_PARTIAL_MAP will be touched.
1516 *
1517 * We've already set DDI_DMA_PARTIAL in dma_flags,
1518 * so if it's now missing, there's something screwy
1519 * happening. We plow on....
1520 */
1521
1522 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
1523 arcmsr_warn(acb,
1524 "dma partial mapping lost ...impossible case!");
1525 }
1526 if (ddi_dma_numwin(ccb->pkt_dma_handle, &ccb->pkt_nwin) ==
1527 DDI_FAILURE) {
1528 arcmsr_warn(acb, "ddi_dma_numwin() failed");
1529 }
1530
1531 if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1532 &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1533 &ccb->pkt_dmacookies[0], &ccb->pkt_ncookies) ==
1534 DDI_FAILURE) {
1535 arcmsr_warn(acb, "ddi_dma_getwin failed");
1536 }
1537
1538 i = 0;
1539 /* first cookie is accessed from ccb->pkt_dmacookies[0] */
1540 total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1541 for (;;) {
1542 i++;
1543 if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1544 (i == ccb->pkt_ncookies) ||
1545 (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1546 break;
1547 }
1548 /*
1549 * next cookie will be retrieved from
1550 * ccb->pkt_dmacookies[i]
1551 */
1552 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1553 &ccb->pkt_dmacookies[i]);
1554 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1555 }
1556 ccb->pkt_cookie = i;
1557 ccb->arcmsr_cdb.sgcount = i;
1558 if (total_ccb_xferlen > 512) {
1559 resid = total_ccb_xferlen % 512;
1560 if (resid != 0) {
1561 i--;
1562 total_ccb_xferlen -= resid;
1563 /* modify last sg length */
1564 ccb->pkt_dmacookies[i].dmac_size =
1565 ccb->pkt_dmacookies[i].dmac_size - resid;
1566 ccb->resid_dmacookie.dmac_size = resid;
1567 ccb->resid_dmacookie.dmac_laddress =
1568 ccb->pkt_dmacookies[i].dmac_laddress +
1569 ccb->pkt_dmacookies[i].dmac_size;
1570 }
1571 }
1572 ccb->total_dmac_size = total_ccb_xferlen;
1573 ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1574 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1575
1576 return (DDI_SUCCESS);
1577
1578 case DDI_DMA_MAPPED:
1579 ccb->pkt_nwin = 1; /* all mapped, so only one window */
1580 ccb->pkt_dma_len = 0;
1581 ccb->pkt_dma_offset = 0;
1582 i = 0;
1583 /* first cookie is accessed from ccb->pkt_dmacookies[0] */
1584 total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1585 for (;;) {
1586 i++;
1587 if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1588 (i == ccb->pkt_ncookies) ||
1589 (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1590 break;
1591 }
1592 /*
1593 * next cookie will be retrieved from
1594 * ccb->pkt_dmacookies[i]
1595 */
1596 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1597 &ccb->pkt_dmacookies[i]);
1598 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1599 }
1600 ccb->pkt_cookie = i;
1601 ccb->arcmsr_cdb.sgcount = i;
1602 if (total_ccb_xferlen > 512) {
1603 resid = total_ccb_xferlen % 512;
1604 if (resid != 0) {
1605 i--;
1606 total_ccb_xferlen -= resid;
1607 /* modify last sg length */
1608 ccb->pkt_dmacookies[i].dmac_size =
1609 ccb->pkt_dmacookies[i].dmac_size - resid;
1610 ccb->resid_dmacookie.dmac_size = resid;
1611 ccb->resid_dmacookie.dmac_laddress =
1612 ccb->pkt_dmacookies[i].dmac_laddress +
1613 ccb->pkt_dmacookies[i].dmac_size;
1614 }
1615 }
1616 ccb->total_dmac_size = total_ccb_xferlen;
1617 ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1618 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1619 return (DDI_SUCCESS);
1620
1621 case DDI_DMA_NORESOURCES:
1622 arcmsr_warn(acb, "dma map got 'no resources'");
1623 bioerror(bp, ENOMEM);
1624 break;
1625
1626 case DDI_DMA_NOMAPPING:
1627 arcmsr_warn(acb, "dma map got 'no mapping'");
1628 bioerror(bp, EFAULT);
1629 break;
1630
1631 case DDI_DMA_TOOBIG:
1632 arcmsr_warn(acb, "dma map got 'too big'");
1633 bioerror(bp, EINVAL);
1634 break;
1635
1636 case DDI_DMA_INUSE:
1637 arcmsr_warn(acb, "dma map got 'in use' "
1638 "(should not happen)");
1639 break;
1640 default:
1641 arcmsr_warn(acb, "dma map failed (0x%x)", i);
1642 break;
1643 }
1644
1645 ddi_dma_free_handle(&ccb->pkt_dma_handle);
1646 ccb->pkt_dma_handle = NULL;
1647 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1648 return (DDI_FAILURE);
1649 }
1650
1651
1652 /*
1653 * Function name: arcmsr_dma_move
1654 * Return Values: 0 if successful, -1 if failure
1655 * Description: move DMA resources to next DMA window
1656 * Context: Can only be called from arcmsr_tran_init_pkt()
1657 */
1658 static int
arcmsr_dma_move(struct ACB * acb,struct scsi_pkt * pkt,struct buf * bp)1659 arcmsr_dma_move(struct ACB *acb, struct scsi_pkt *pkt, struct buf *bp)
1660 {
1661 struct CCB *ccb = pkt->pkt_ha_private;
1662 uint8_t i = 0;
1663 int resid = 0;
1664 int total_ccb_xferlen = 0;
1665
1666 if (ccb->resid_dmacookie.dmac_size != 0) {
1667 total_ccb_xferlen += ccb->resid_dmacookie.dmac_size;
1668 ccb->pkt_dmacookies[i].dmac_size =
1669 ccb->resid_dmacookie.dmac_size;
1670 ccb->pkt_dmacookies[i].dmac_laddress =
1671 ccb->resid_dmacookie.dmac_laddress;
1672 i++;
1673 ccb->resid_dmacookie.dmac_size = 0;
1674 }
1675 /*
1676 * If there are no more cookies remaining in this window,
1677 * move to the next window.
1678 */
1679 if (ccb->pkt_cookie == ccb->pkt_ncookies) {
1680 /*
1681 * only dma map "partial" arrive here
1682 */
1683 if ((ccb->pkt_curwin == ccb->pkt_nwin) &&
1684 (ccb->pkt_nwin == 1)) {
1685 return (DDI_SUCCESS);
1686 }
1687
1688 /* At last window, cannot move */
1689 if (++ccb->pkt_curwin >= ccb->pkt_nwin) {
1690 arcmsr_warn(acb, "dma partial set, numwin exceeded");
1691 return (DDI_FAILURE);
1692 }
1693 if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1694 &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1695 &ccb->pkt_dmacookies[i], &ccb->pkt_ncookies) ==
1696 DDI_FAILURE) {
1697 arcmsr_warn(acb, "ddi_dma_getwin failed");
1698 return (DDI_FAILURE);
1699 }
1700 /* reset cookie pointer */
1701 ccb->pkt_cookie = 0;
1702 } else {
1703 /*
1704 * only dma map "all" arrive here
1705 * We still have more cookies in this window,
1706 * get the next one
1707 * access the pkt_dma_handle remain cookie record at
1708 * ccb->pkt_dmacookies array
1709 */
1710 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1711 &ccb->pkt_dmacookies[i]);
1712 }
1713
1714 /* Get remaining cookies in this window, up to our maximum */
1715 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1716
1717 /* retrieve and store cookies, start at ccb->pkt_dmacookies[0] */
1718 for (;;) {
1719 i++;
1720 /* handled cookies count level indicator */
1721 ccb->pkt_cookie++;
1722 if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1723 (ccb->pkt_cookie == ccb->pkt_ncookies) ||
1724 (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1725 break;
1726 }
1727 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1728 &ccb->pkt_dmacookies[i]);
1729 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1730 }
1731
1732 ccb->arcmsr_cdb.sgcount = i;
1733 if (total_ccb_xferlen > 512) {
1734 resid = total_ccb_xferlen % 512;
1735 if (resid != 0) {
1736 i--;
1737 total_ccb_xferlen -= resid;
1738 /* modify last sg length */
1739 ccb->pkt_dmacookies[i].dmac_size =
1740 ccb->pkt_dmacookies[i].dmac_size - resid;
1741 ccb->resid_dmacookie.dmac_size = resid;
1742 ccb->resid_dmacookie.dmac_laddress =
1743 ccb->pkt_dmacookies[i].dmac_laddress +
1744 ccb->pkt_dmacookies[i].dmac_size;
1745 }
1746 }
1747 ccb->total_dmac_size += total_ccb_xferlen;
1748 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1749
1750 return (DDI_SUCCESS);
1751 }
1752
1753
1754 /*ARGSUSED*/
1755 static void
arcmsr_build_ccb(struct CCB * ccb)1756 arcmsr_build_ccb(struct CCB *ccb)
1757 {
1758 struct scsi_pkt *pkt = ccb->pkt;
1759 struct ARCMSR_CDB *arcmsr_cdb;
1760 char *psge;
1761 uint32_t address_lo, address_hi;
1762 int arccdbsize = 0x30;
1763 uint8_t sgcount;
1764
1765 arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1766 psge = (char *)&arcmsr_cdb->sgu;
1767
1768 bcopy((caddr_t)pkt->pkt_cdbp, arcmsr_cdb->Cdb, arcmsr_cdb->CdbLength);
1769 sgcount = ccb->arcmsr_cdb.sgcount;
1770
1771 if (sgcount != 0) {
1772 int length, i;
1773 int cdb_sgcount = 0;
1774 int total_xfer_length = 0;
1775
1776 /* map stor port SG list to our iop SG List. */
1777 for (i = 0; i < sgcount; i++) {
1778 /* Get physaddr of the current data pointer */
1779 length = ccb->pkt_dmacookies[i].dmac_size;
1780 total_xfer_length += length;
1781 address_lo =
1782 dma_addr_lo32(ccb->pkt_dmacookies[i].dmac_laddress);
1783 address_hi =
1784 dma_addr_hi32(ccb->pkt_dmacookies[i].dmac_laddress);
1785
1786 if (address_hi == 0) {
1787 struct SG32ENTRY *dma_sg;
1788
1789 dma_sg = (struct SG32ENTRY *)(intptr_t)psge;
1790 dma_sg->address = address_lo;
1791 dma_sg->length = length;
1792 psge += sizeof (struct SG32ENTRY);
1793 arccdbsize += sizeof (struct SG32ENTRY);
1794 } else {
1795 struct SG64ENTRY *dma_sg;
1796
1797 dma_sg = (struct SG64ENTRY *)(intptr_t)psge;
1798 dma_sg->addresshigh = address_hi;
1799 dma_sg->address = address_lo;
1800 dma_sg->length = length | IS_SG64_ADDR;
1801 psge += sizeof (struct SG64ENTRY);
1802 arccdbsize += sizeof (struct SG64ENTRY);
1803 }
1804 cdb_sgcount++;
1805 }
1806 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
1807 arcmsr_cdb->DataLength = total_xfer_length;
1808 if (arccdbsize > 256) {
1809 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1810 }
1811 } else {
1812 arcmsr_cdb->DataLength = 0;
1813 }
1814
1815 if (ccb->ccb_flags & CCB_FLAG_DMAWRITE)
1816 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1817 ccb->arc_cdb_size = arccdbsize;
1818 }
1819
1820 /*
1821 * arcmsr_post_ccb - Send a protocol specific ARC send postcard to a AIOC.
1822 *
1823 * handle: Handle of registered ARC protocol driver
1824 * adapter_id: AIOC unique identifier(integer)
1825 * pPOSTCARD_SEND: Pointer to ARC send postcard
1826 *
1827 * This routine posts a ARC send postcard to the request post FIFO of a
1828 * specific ARC adapter.
1829 */
1830 static int
arcmsr_post_ccb(struct ACB * acb,struct CCB * ccb)1831 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb)
1832 {
1833 uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
1834 struct scsi_pkt *pkt = ccb->pkt;
1835 struct ARCMSR_CDB *arcmsr_cdb;
1836 uint_t pkt_flags = pkt->pkt_flags;
1837
1838 arcmsr_cdb = &ccb->arcmsr_cdb;
1839
1840 /* TODO: Use correct offset and size for syncing? */
1841 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0, DDI_DMA_SYNC_FORDEV) ==
1842 DDI_FAILURE)
1843 return (DDI_FAILURE);
1844
1845 atomic_inc_32(&acb->ccboutstandingcount);
1846 ccb->ccb_time = (time_t)(ddi_get_time() + pkt->pkt_time);
1847
1848 ccb->ccb_state = ARCMSR_CCB_START;
1849 switch (acb->adapter_type) {
1850 case ACB_ADAPTER_TYPE_A:
1851 {
1852 struct HBA_msgUnit *phbamu;
1853
1854 phbamu = (struct HBA_msgUnit *)acb->pmu;
1855 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1856 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1857 &phbamu->inbound_queueport,
1858 cdb_phyaddr_pattern |
1859 ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1860 } else {
1861 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1862 &phbamu->inbound_queueport, cdb_phyaddr_pattern);
1863 }
1864 if (pkt_flags & FLAG_NOINTR)
1865 arcmsr_polling_hba_ccbdone(acb, ccb);
1866 break;
1867 }
1868
1869 case ACB_ADAPTER_TYPE_B:
1870 {
1871 struct HBB_msgUnit *phbbmu;
1872 int ending_index, index;
1873
1874 phbbmu = (struct HBB_msgUnit *)acb->pmu;
1875 index = phbbmu->postq_index;
1876 ending_index = ((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
1877 phbbmu->post_qbuffer[ending_index] = 0;
1878 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1879 phbbmu->post_qbuffer[index] =
1880 (cdb_phyaddr_pattern|ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1881 } else {
1882 phbbmu->post_qbuffer[index] = cdb_phyaddr_pattern;
1883 }
1884 index++;
1885 /* if last index number set it to 0 */
1886 index %= ARCMSR_MAX_HBB_POSTQUEUE;
1887 phbbmu->postq_index = index;
1888 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1889 &phbbmu->hbb_doorbell->drv2iop_doorbell,
1890 ARCMSR_DRV2IOP_CDB_POSTED);
1891
1892 if (pkt_flags & FLAG_NOINTR)
1893 arcmsr_polling_hbb_ccbdone(acb, ccb);
1894 break;
1895 }
1896
1897 case ACB_ADAPTER_TYPE_C:
1898 {
1899 struct HBC_msgUnit *phbcmu;
1900 uint32_t ccb_post_stamp, arc_cdb_size;
1901
1902 phbcmu = (struct HBC_msgUnit *)acb->pmu;
1903 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 :
1904 ccb->arc_cdb_size;
1905 ccb_post_stamp = (cdb_phyaddr_pattern |
1906 ((arc_cdb_size-1) >> 6) |1);
1907 if (acb->cdb_phyaddr_hi32) {
1908 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1909 &phbcmu->inbound_queueport_high,
1910 acb->cdb_phyaddr_hi32);
1911 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1912 &phbcmu->inbound_queueport_low, ccb_post_stamp);
1913 } else {
1914 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1915 &phbcmu->inbound_queueport_low, ccb_post_stamp);
1916 }
1917 if (pkt_flags & FLAG_NOINTR)
1918 arcmsr_polling_hbc_ccbdone(acb, ccb);
1919 break;
1920 }
1921
1922 }
1923 return (DDI_SUCCESS);
1924 }
1925
1926
1927 static void
arcmsr_ccb_complete(struct CCB * ccb,int flag)1928 arcmsr_ccb_complete(struct CCB *ccb, int flag)
1929 {
1930 struct ACB *acb = ccb->acb;
1931 struct scsi_pkt *pkt = ccb->pkt;
1932
1933 if (pkt == NULL) {
1934 return;
1935 }
1936 ccb->ccb_state |= ARCMSR_CCB_DONE;
1937 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1938 STATE_SENT_CMD | STATE_GOT_STATUS);
1939
1940 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1941 (pkt->pkt_state & STATE_XFERRED_DATA)) {
1942 (void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1943 DDI_DMA_SYNC_FORCPU);
1944 }
1945 /*
1946 * TODO: This represents a potential race condition, and is
1947 * ultimately a poor design decision. Revisit this code
1948 * and solve the mutex ownership issue correctly.
1949 */
1950 if (mutex_owned(&acb->isr_mutex)) {
1951 mutex_exit(&acb->isr_mutex);
1952 scsi_hba_pkt_comp(pkt);
1953 mutex_enter(&acb->isr_mutex);
1954 } else {
1955 scsi_hba_pkt_comp(pkt);
1956 }
1957 if (flag == 1) {
1958 atomic_dec_32(&acb->ccboutstandingcount);
1959 }
1960 }
1961
1962 static void
arcmsr_report_ccb_state(struct ACB * acb,struct CCB * ccb,boolean_t error)1963 arcmsr_report_ccb_state(struct ACB *acb, struct CCB *ccb, boolean_t error)
1964 {
1965 int id, lun;
1966
1967 ccb->ccb_state |= ARCMSR_CCB_DONE;
1968 id = ccb->pkt->pkt_address.a_target;
1969 lun = ccb->pkt->pkt_address.a_lun;
1970
1971 if (!error) {
1972 if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
1973 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1974 }
1975 ccb->pkt->pkt_reason = CMD_CMPLT;
1976 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1977 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1978 &ccb->complete_queue_pointer, &acb->ccb_complete_list);
1979
1980 } else {
1981 switch (ccb->arcmsr_cdb.DeviceStatus) {
1982 case ARCMSR_DEV_SELECT_TIMEOUT:
1983 if (acb->devstate[id][lun] == ARECA_RAID_GOOD) {
1984 arcmsr_warn(acb,
1985 "target %d lun %d selection "
1986 "timeout", id, lun);
1987 }
1988 acb->devstate[id][lun] = ARECA_RAID_GONE;
1989 ccb->pkt->pkt_reason = CMD_TIMEOUT; /* CMD_DEV_GONE; */
1990 ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1991 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1992 &ccb->complete_queue_pointer,
1993 &acb->ccb_complete_list);
1994 break;
1995 case ARCMSR_DEV_ABORTED:
1996 case ARCMSR_DEV_INIT_FAIL:
1997 arcmsr_warn(acb, "isr got 'ARCMSR_DEV_ABORTED'"
1998 " 'ARCMSR_DEV_INIT_FAIL'");
1999 arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
2000 acb->devstate[id][lun] = ARECA_RAID_GONE;
2001 ccb->pkt->pkt_reason = CMD_DEV_GONE;
2002 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2003 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2004 &ccb->complete_queue_pointer,
2005 &acb->ccb_complete_list);
2006 break;
2007 case SCSISTAT_CHECK_CONDITION:
2008 acb->devstate[id][lun] = ARECA_RAID_GOOD;
2009 arcmsr_report_sense_info(ccb);
2010 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2011 &ccb->complete_queue_pointer,
2012 &acb->ccb_complete_list);
2013 break;
2014 default:
2015 arcmsr_warn(acb,
2016 "target %d lun %d isr received CMD_DONE"
2017 " with unknown DeviceStatus (0x%x)",
2018 id, lun, ccb->arcmsr_cdb.DeviceStatus);
2019 arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
2020 acb->devstate[id][lun] = ARECA_RAID_GONE;
2021 /* unknown error or crc error just for retry */
2022 ccb->pkt->pkt_reason = CMD_TRAN_ERR;
2023 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2024 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2025 &ccb->complete_queue_pointer,
2026 &acb->ccb_complete_list);
2027 break;
2028 }
2029 }
2030 }
2031
2032
2033 static void
arcmsr_drain_donequeue(struct ACB * acb,struct CCB * ccb,boolean_t error)2034 arcmsr_drain_donequeue(struct ACB *acb, struct CCB *ccb, boolean_t error)
2035 {
2036 uint16_t ccb_state;
2037
2038 if (ccb->acb != acb) {
2039 return;
2040 }
2041 if (ccb->ccb_state != ARCMSR_CCB_START) {
2042 switch (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
2043 case ARCMSR_CCB_TIMEOUT:
2044 ccb_state = ccb->ccb_state;
2045 if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2046 arcmsr_free_ccb(ccb);
2047 else
2048 ccb->ccb_state |= ARCMSR_CCB_BACK;
2049 return;
2050
2051 case ARCMSR_CCB_ABORTED:
2052 ccb_state = ccb->ccb_state;
2053 if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2054 arcmsr_free_ccb(ccb);
2055 else
2056 ccb->ccb_state |= ARCMSR_CCB_BACK;
2057 return;
2058 case ARCMSR_CCB_RESET:
2059 ccb_state = ccb->ccb_state;
2060 if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2061 arcmsr_free_ccb(ccb);
2062 else
2063 ccb->ccb_state |= ARCMSR_CCB_BACK;
2064 return;
2065 default:
2066 return;
2067 }
2068 }
2069 arcmsr_report_ccb_state(acb, ccb, error);
2070 }
2071
2072 static void
arcmsr_report_sense_info(struct CCB * ccb)2073 arcmsr_report_sense_info(struct CCB *ccb)
2074 {
2075 struct SENSE_DATA *cdb_sensedata;
2076 struct scsi_pkt *pkt = ccb->pkt;
2077 struct scsi_arq_status *arq_status;
2078 union scsi_cdb *cdbp;
2079 uint64_t err_blkno;
2080
2081 cdbp = (void *)pkt->pkt_cdbp;
2082 err_blkno = ARCMSR_GETGXADDR(ccb->arcmsr_cdb.CdbLength, cdbp);
2083
2084 arq_status = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
2085 bzero((caddr_t)arq_status, sizeof (struct scsi_arq_status));
2086 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
2087 arq_status->sts_rqpkt_reason = CMD_CMPLT;
2088 arq_status->sts_rqpkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET |
2089 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS);
2090 arq_status->sts_rqpkt_statistics = 0;
2091 arq_status->sts_rqpkt_resid = 0;
2092
2093 pkt->pkt_reason = CMD_CMPLT;
2094 /* auto rqsense took place */
2095 pkt->pkt_state |= STATE_ARQ_DONE;
2096
2097 cdb_sensedata = (struct SENSE_DATA *)ccb->arcmsr_cdb.SenseData;
2098 if (&arq_status->sts_sensedata != NULL) {
2099 if (err_blkno <= 0xfffffffful) {
2100 struct scsi_extended_sense *sts_sensedata;
2101
2102 sts_sensedata = &arq_status->sts_sensedata;
2103 sts_sensedata->es_code = cdb_sensedata->ErrorCode;
2104 /* must eq CLASS_EXTENDED_SENSE (0x07) */
2105 sts_sensedata->es_class = cdb_sensedata->ErrorClass;
2106 sts_sensedata->es_valid = cdb_sensedata->Valid;
2107 sts_sensedata->es_segnum = cdb_sensedata->SegmentNumber;
2108 sts_sensedata->es_key = cdb_sensedata->SenseKey;
2109 sts_sensedata->es_ili = cdb_sensedata->IncorrectLength;
2110 sts_sensedata->es_eom = cdb_sensedata->EndOfMedia;
2111 sts_sensedata->es_filmk = cdb_sensedata->FileMark;
2112 sts_sensedata->es_info_1 = (err_blkno >> 24) & 0xFF;
2113 sts_sensedata->es_info_2 = (err_blkno >> 16) & 0xFF;
2114 sts_sensedata->es_info_3 = (err_blkno >> 8) & 0xFF;
2115 sts_sensedata->es_info_4 = err_blkno & 0xFF;
2116 sts_sensedata->es_add_len =
2117 cdb_sensedata->AdditionalSenseLength;
2118 sts_sensedata->es_cmd_info[0] =
2119 cdb_sensedata->CommandSpecificInformation[0];
2120 sts_sensedata->es_cmd_info[1] =
2121 cdb_sensedata->CommandSpecificInformation[1];
2122 sts_sensedata->es_cmd_info[2] =
2123 cdb_sensedata->CommandSpecificInformation[2];
2124 sts_sensedata->es_cmd_info[3] =
2125 cdb_sensedata->CommandSpecificInformation[3];
2126 sts_sensedata->es_add_code =
2127 cdb_sensedata->AdditionalSenseCode;
2128 sts_sensedata->es_qual_code =
2129 cdb_sensedata->AdditionalSenseCodeQualifier;
2130 sts_sensedata->es_fru_code =
2131 cdb_sensedata->FieldReplaceableUnitCode;
2132 } else { /* 64-bit LBA */
2133 struct scsi_descr_sense_hdr *dsp;
2134 struct scsi_information_sense_descr *isd;
2135
2136 dsp = (struct scsi_descr_sense_hdr *)
2137 &arq_status->sts_sensedata;
2138 dsp->ds_class = CLASS_EXTENDED_SENSE;
2139 dsp->ds_code = CODE_FMT_DESCR_CURRENT;
2140 dsp->ds_key = cdb_sensedata->SenseKey;
2141 dsp->ds_add_code = cdb_sensedata->AdditionalSenseCode;
2142 dsp->ds_qual_code =
2143 cdb_sensedata->AdditionalSenseCodeQualifier;
2144 dsp->ds_addl_sense_length =
2145 sizeof (struct scsi_information_sense_descr);
2146
2147 isd = (struct scsi_information_sense_descr *)(dsp+1);
2148 isd->isd_descr_type = DESCR_INFORMATION;
2149 isd->isd_valid = 1;
2150 isd->isd_information[0] = (err_blkno >> 56) & 0xFF;
2151 isd->isd_information[1] = (err_blkno >> 48) & 0xFF;
2152 isd->isd_information[2] = (err_blkno >> 40) & 0xFF;
2153 isd->isd_information[3] = (err_blkno >> 32) & 0xFF;
2154 isd->isd_information[4] = (err_blkno >> 24) & 0xFF;
2155 isd->isd_information[5] = (err_blkno >> 16) & 0xFF;
2156 isd->isd_information[6] = (err_blkno >> 8) & 0xFF;
2157 isd->isd_information[7] = (err_blkno) & 0xFF;
2158 }
2159 }
2160 }
2161
2162
2163 static int
arcmsr_seek_cmd2abort(struct ACB * acb,struct scsi_pkt * abortpkt)2164 arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt)
2165 {
2166 struct CCB *ccb;
2167 uint32_t intmask_org = 0;
2168 int i = 0;
2169
2170 acb->num_aborts++;
2171
2172 if (abortpkt != NULL) {
2173 /*
2174 * We don't support abort of a single packet. All
2175 * callers in our kernel always do a global abort, so
2176 * there is no point in having code to support it
2177 * here.
2178 */
2179 return (DDI_FAILURE);
2180 }
2181
2182 /*
2183 * if abortpkt is NULL, the upper layer needs us
2184 * to abort all commands
2185 */
2186 if (acb->ccboutstandingcount != 0) {
2187 /* disable all outbound interrupt */
2188 intmask_org = arcmsr_disable_allintr(acb);
2189 /* clear and abort all outbound posted Q */
2190 arcmsr_done4abort_postqueue(acb);
2191 /* talk to iop 331 outstanding command aborted */
2192 (void) arcmsr_abort_host_command(acb);
2193
2194 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2195 ccb = acb->pccb_pool[i];
2196 if (ccb->ccb_state == ARCMSR_CCB_START) {
2197 /*
2198 * this ccb will complete at
2199 * hwinterrupt
2200 */
2201 /* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
2202 ccb->pkt->pkt_reason = CMD_ABORTED;
2203 ccb->pkt->pkt_statistics |= STAT_ABORTED;
2204 arcmsr_ccb_complete(ccb, 1);
2205 }
2206 }
2207 /*
2208 * enable outbound Post Queue, outbound
2209 * doorbell Interrupt
2210 */
2211 arcmsr_enable_allintr(acb, intmask_org);
2212 }
2213 return (DDI_SUCCESS);
2214 }
2215
2216
2217 /*
2218 * Autoconfiguration support
2219 */
2220 static int
arcmsr_parse_devname(char * devnm,int * tgt,int * lun)2221 arcmsr_parse_devname(char *devnm, int *tgt, int *lun) {
2222
2223 char devbuf[SCSI_MAXNAMELEN];
2224 char *addr;
2225 char *p, *tp, *lp;
2226 long num;
2227
2228 /* Parse dev name and address */
2229 (void) strlcpy(devbuf, devnm, sizeof (devbuf));
2230 addr = "";
2231 for (p = devbuf; *p != '\0'; p++) {
2232 if (*p == '@') {
2233 addr = p + 1;
2234 *p = '\0';
2235 } else if (*p == ':') {
2236 *p = '\0';
2237 break;
2238 }
2239 }
2240
2241 /* Parse target and lun */
2242 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
2243 if (*p == ',') {
2244 lp = p + 1;
2245 *p = '\0';
2246 break;
2247 }
2248 }
2249 if ((tgt != NULL) && (tp != NULL)) {
2250 if (ddi_strtol(tp, NULL, 0x10, &num) != 0)
2251 return (-1);
2252 *tgt = (int)num;
2253 }
2254 if ((lun != NULL) && (lp != NULL)) {
2255 if (ddi_strtol(lp, NULL, 0x10, &num) != 0)
2256 return (-1);
2257 *lun = (int)num;
2258 }
2259 return (0);
2260 }
2261
2262 static int
arcmsr_name_node(dev_info_t * dip,char * name,int len)2263 arcmsr_name_node(dev_info_t *dip, char *name, int len)
2264 {
2265 int tgt, lun;
2266
2267 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "target",
2268 -1);
2269 if (tgt == -1)
2270 return (DDI_FAILURE);
2271 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "lun",
2272 -1);
2273 if (lun == -1)
2274 return (DDI_FAILURE);
2275 (void) snprintf(name, len, "%x,%x", tgt, lun);
2276 return (DDI_SUCCESS);
2277 }
2278
2279 static dev_info_t *
arcmsr_find_child(struct ACB * acb,uint16_t tgt,uint8_t lun)2280 arcmsr_find_child(struct ACB *acb, uint16_t tgt, uint8_t lun)
2281 {
2282 dev_info_t *child = NULL;
2283 char addr[SCSI_MAXNAMELEN];
2284 char tmp[SCSI_MAXNAMELEN];
2285
2286 (void) sprintf(addr, "%x,%x", tgt, lun);
2287
2288 for (child = ddi_get_child(acb->dev_info);
2289 child;
2290 child = ddi_get_next_sibling(child)) {
2291 /* We don't care about non-persistent node */
2292 if (ndi_dev_is_persistent_node(child) == 0)
2293 continue;
2294 if (arcmsr_name_node(child, tmp, SCSI_MAXNAMELEN) !=
2295 DDI_SUCCESS)
2296 continue;
2297 if (strcmp(addr, tmp) == 0)
2298 break;
2299 }
2300 return (child);
2301 }
2302
2303 static int
arcmsr_config_child(struct ACB * acb,struct scsi_device * sd,dev_info_t ** dipp)2304 arcmsr_config_child(struct ACB *acb, struct scsi_device *sd, dev_info_t **dipp)
2305 {
2306 char *nodename = NULL;
2307 char **compatible = NULL;
2308 int ncompatible = 0;
2309 dev_info_t *ldip = NULL;
2310 int tgt = sd->sd_address.a_target;
2311 int lun = sd->sd_address.a_lun;
2312 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
2313 int rval;
2314
2315 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
2316 NULL, &nodename, &compatible, &ncompatible);
2317 if (nodename == NULL) {
2318 arcmsr_warn(acb, "found no comptible driver for T%dL%d",
2319 tgt, lun);
2320 rval = NDI_FAILURE;
2321 goto finish;
2322 }
2323 /* Create dev node */
2324 rval = ndi_devi_alloc(acb->dev_info, nodename, DEVI_SID_NODEID, &ldip);
2325 if (rval == NDI_SUCCESS) {
2326 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
2327 DDI_PROP_SUCCESS) {
2328 arcmsr_warn(acb,
2329 "unable to create target property for T%dL%d",
2330 tgt, lun);
2331 rval = NDI_FAILURE;
2332 goto finish;
2333 }
2334 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
2335 DDI_PROP_SUCCESS) {
2336 arcmsr_warn(acb,
2337 "unable to create lun property for T%dL%d",
2338 tgt, lun);
2339 rval = NDI_FAILURE;
2340 goto finish;
2341 }
2342 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
2343 "compatible", compatible, ncompatible) !=
2344 DDI_PROP_SUCCESS) {
2345 arcmsr_warn(acb,
2346 "unable to create compatible property for T%dL%d",
2347 tgt, lun);
2348 rval = NDI_FAILURE;
2349 goto finish;
2350 }
2351 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
2352 if (rval != NDI_SUCCESS) {
2353 arcmsr_warn(acb, "unable to online T%dL%d", tgt, lun);
2354 ndi_prop_remove_all(ldip);
2355 (void) ndi_devi_free(ldip);
2356 } else {
2357 arcmsr_log(acb, CE_NOTE, "T%dL%d onlined", tgt, lun);
2358 }
2359 }
2360 finish:
2361 if (dipp)
2362 *dipp = ldip;
2363
2364 scsi_hba_nodename_compatible_free(nodename, compatible);
2365 return (rval);
2366 }
2367
2368 static int
arcmsr_config_lun(struct ACB * acb,uint16_t tgt,uint8_t lun,dev_info_t ** ldip)2369 arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun, dev_info_t **ldip)
2370 {
2371 struct scsi_device sd;
2372 dev_info_t *child;
2373 int rval;
2374
2375 if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
2376 if (ldip) {
2377 *ldip = child;
2378 }
2379 return (NDI_SUCCESS);
2380 }
2381 bzero(&sd, sizeof (struct scsi_device));
2382 sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
2383 sd.sd_address.a_target = tgt;
2384 sd.sd_address.a_lun = lun;
2385
2386 rval = scsi_hba_probe(&sd, NULL);
2387 if (rval == SCSIPROBE_EXISTS)
2388 rval = arcmsr_config_child(acb, &sd, ldip);
2389 scsi_unprobe(&sd);
2390 return (rval);
2391 }
2392
2393
2394 static int
arcmsr_add_intr(struct ACB * acb,int intr_type)2395 arcmsr_add_intr(struct ACB *acb, int intr_type)
2396 {
2397 int rc, count;
2398 dev_info_t *dev_info;
2399 const char *type_str;
2400
2401 switch (intr_type) {
2402 case DDI_INTR_TYPE_MSI:
2403 type_str = "MSI";
2404 break;
2405 case DDI_INTR_TYPE_MSIX:
2406 type_str = "MSIX";
2407 break;
2408 case DDI_INTR_TYPE_FIXED:
2409 type_str = "FIXED";
2410 break;
2411 default:
2412 type_str = "unknown";
2413 break;
2414 }
2415
2416 dev_info = acb->dev_info;
2417 /* Determine number of supported interrupts */
2418 rc = ddi_intr_get_nintrs(dev_info, intr_type, &count);
2419 if ((rc != DDI_SUCCESS) || (count == 0)) {
2420 arcmsr_warn(acb,
2421 "no interrupts of type %s, rc=0x%x, count=%d",
2422 type_str, rc, count);
2423 return (DDI_FAILURE);
2424 }
2425 acb->intr_size = sizeof (ddi_intr_handle_t) * count;
2426 acb->phandle = kmem_zalloc(acb->intr_size, KM_SLEEP);
2427 rc = ddi_intr_alloc(dev_info, acb->phandle, intr_type, 0,
2428 count, &acb->intr_count, DDI_INTR_ALLOC_NORMAL);
2429 if ((rc != DDI_SUCCESS) || (acb->intr_count == 0)) {
2430 arcmsr_warn(acb, "ddi_intr_alloc(%s) failed 0x%x",
2431 type_str, rc);
2432 return (DDI_FAILURE);
2433 }
2434 if (acb->intr_count < count) {
2435 arcmsr_log(acb, CE_NOTE, "Got %d interrupts, but requested %d",
2436 acb->intr_count, count);
2437 }
2438 /*
2439 * Get priority for first msi, assume remaining are all the same
2440 */
2441 if (ddi_intr_get_pri(acb->phandle[0], &acb->intr_pri) != DDI_SUCCESS) {
2442 arcmsr_warn(acb, "ddi_intr_get_pri failed");
2443 return (DDI_FAILURE);
2444 }
2445 if (acb->intr_pri >= ddi_intr_get_hilevel_pri()) {
2446 arcmsr_warn(acb, "high level interrupt not supported");
2447 return (DDI_FAILURE);
2448 }
2449
2450 for (int x = 0; x < acb->intr_count; x++) {
2451 if (ddi_intr_add_handler(acb->phandle[x], arcmsr_intr_handler,
2452 (caddr_t)acb, NULL) != DDI_SUCCESS) {
2453 arcmsr_warn(acb, "ddi_intr_add_handler(%s) failed",
2454 type_str);
2455 return (DDI_FAILURE);
2456 }
2457 }
2458 (void) ddi_intr_get_cap(acb->phandle[0], &acb->intr_cap);
2459 if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2460 /* Call ddi_intr_block_enable() for MSI */
2461 (void) ddi_intr_block_enable(acb->phandle, acb->intr_count);
2462 } else {
2463 /* Call ddi_intr_enable() for MSI non block enable */
2464 for (int x = 0; x < acb->intr_count; x++) {
2465 (void) ddi_intr_enable(acb->phandle[x]);
2466 }
2467 }
2468 return (DDI_SUCCESS);
2469 }
2470
2471 static void
arcmsr_remove_intr(struct ACB * acb)2472 arcmsr_remove_intr(struct ACB *acb)
2473 {
2474 int x;
2475
2476 if (acb->phandle == NULL)
2477 return;
2478
2479 /* Disable all interrupts */
2480 if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2481 /* Call ddi_intr_block_disable() */
2482 (void) ddi_intr_block_disable(acb->phandle, acb->intr_count);
2483 } else {
2484 for (x = 0; x < acb->intr_count; x++) {
2485 (void) ddi_intr_disable(acb->phandle[x]);
2486 }
2487 }
2488 /* Call ddi_intr_remove_handler() */
2489 for (x = 0; x < acb->intr_count; x++) {
2490 (void) ddi_intr_remove_handler(acb->phandle[x]);
2491 (void) ddi_intr_free(acb->phandle[x]);
2492 }
2493 kmem_free(acb->phandle, acb->intr_size);
2494 acb->phandle = NULL;
2495 }
2496
2497 static void
arcmsr_mutex_init(struct ACB * acb)2498 arcmsr_mutex_init(struct ACB *acb)
2499 {
2500 mutex_init(&acb->isr_mutex, NULL, MUTEX_DRIVER, NULL);
2501 mutex_init(&acb->acb_mutex, NULL, MUTEX_DRIVER, NULL);
2502 mutex_init(&acb->postq_mutex, NULL, MUTEX_DRIVER, NULL);
2503 mutex_init(&acb->workingQ_mutex, NULL, MUTEX_DRIVER, NULL);
2504 mutex_init(&acb->ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
2505 }
2506
2507 static void
arcmsr_mutex_destroy(struct ACB * acb)2508 arcmsr_mutex_destroy(struct ACB *acb)
2509 {
2510 mutex_destroy(&acb->isr_mutex);
2511 mutex_destroy(&acb->acb_mutex);
2512 mutex_destroy(&acb->postq_mutex);
2513 mutex_destroy(&acb->workingQ_mutex);
2514 mutex_destroy(&acb->ioctl_mutex);
2515 }
2516
2517 static int
arcmsr_initialize(struct ACB * acb)2518 arcmsr_initialize(struct ACB *acb)
2519 {
2520 struct CCB *pccb_tmp;
2521 size_t allocated_length;
2522 uint16_t wval;
2523 uint_t intmask_org, count;
2524 caddr_t arcmsr_ccbs_area;
2525 uint32_t wlval, cdb_phyaddr, offset, realccb_size;
2526 int32_t dma_sync_size;
2527 int i, id, lun, instance;
2528
2529 instance = ddi_get_instance(acb->dev_info);
2530 wlval = pci_config_get32(acb->pci_acc_handle, 0);
2531 wval = (uint16_t)((wlval >> 16) & 0xffff);
2532 realccb_size = P2ROUNDUP(sizeof (struct CCB), 32);
2533 switch (wval) {
2534 case PCI_DEVICE_ID_ARECA_1880:
2535 case PCI_DEVICE_ID_ARECA_1882:
2536 {
2537 uint32_t *iop_mu_regs_map0;
2538
2539 acb->adapter_type = ACB_ADAPTER_TYPE_C; /* lsi */
2540 dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2541 if (ddi_regs_map_setup(acb->dev_info, 2,
2542 (caddr_t *)&iop_mu_regs_map0, 0,
2543 sizeof (struct HBC_msgUnit), &acb->dev_acc_attr,
2544 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2545 arcmsr_warn(acb, "unable to map registers");
2546 return (DDI_FAILURE);
2547 }
2548
2549 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2550 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2551 DDI_SUCCESS) {
2552 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2553 arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2554 return (DDI_FAILURE);
2555 }
2556
2557 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2558 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2559 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2560 &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2561 arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2562 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2563 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2564 return (DDI_FAILURE);
2565 }
2566
2567 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2568 (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2569 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2570 &count) != DDI_DMA_MAPPED) {
2571 arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2572 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2573 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2574 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2575 return (DDI_FAILURE);
2576 }
2577 bzero(arcmsr_ccbs_area, dma_sync_size);
2578 offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2579 - PtrToNum(arcmsr_ccbs_area));
2580 arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2581 /* ioport base */
2582 acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2583 break;
2584 }
2585
2586 case PCI_DEVICE_ID_ARECA_1201:
2587 {
2588 uint32_t *iop_mu_regs_map0;
2589 uint32_t *iop_mu_regs_map1;
2590 struct HBB_msgUnit *phbbmu;
2591
2592 acb->adapter_type = ACB_ADAPTER_TYPE_B; /* marvell */
2593 dma_sync_size =
2594 (ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20) +
2595 sizeof (struct HBB_msgUnit);
2596 /* Allocate memory for the ccb */
2597 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2598 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2599 DDI_SUCCESS) {
2600 arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2601 return (DDI_FAILURE);
2602 }
2603
2604 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2605 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2606 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2607 &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2608 arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2609 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2610 return (DDI_FAILURE);
2611 }
2612
2613 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2614 (caddr_t)arcmsr_ccbs_area, dma_sync_size,
2615 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2616 NULL, &acb->ccb_cookie, &count) != DDI_DMA_MAPPED) {
2617 arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2618 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2619 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2620 return (DDI_FAILURE);
2621 }
2622 bzero(arcmsr_ccbs_area, dma_sync_size);
2623 offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2624 - PtrToNum(arcmsr_ccbs_area));
2625 arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2626 acb->pmu = (struct msgUnit *)
2627 NumToPtr(PtrToNum(arcmsr_ccbs_area) +
2628 (realccb_size*ARCMSR_MAX_FREECCB_NUM));
2629 phbbmu = (struct HBB_msgUnit *)acb->pmu;
2630
2631 /* setup device register */
2632 if (ddi_regs_map_setup(acb->dev_info, 1,
2633 (caddr_t *)&iop_mu_regs_map0, 0,
2634 sizeof (struct HBB_DOORBELL), &acb->dev_acc_attr,
2635 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2636 arcmsr_warn(acb, "unable to map base0 registers");
2637 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2638 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2639 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2640 return (DDI_FAILURE);
2641 }
2642
2643 /* ARCMSR_DRV2IOP_DOORBELL */
2644 phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)iop_mu_regs_map0;
2645 if (ddi_regs_map_setup(acb->dev_info, 2,
2646 (caddr_t *)&iop_mu_regs_map1, 0,
2647 sizeof (struct HBB_RWBUFFER), &acb->dev_acc_attr,
2648 &acb->reg_mu_acc_handle1) != DDI_SUCCESS) {
2649 arcmsr_warn(acb, "unable to map base1 registers");
2650 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2651 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2652 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2653 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2654 return (DDI_FAILURE);
2655 }
2656
2657 /* ARCMSR_MSGCODE_RWBUFFER */
2658 phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)iop_mu_regs_map1;
2659 break;
2660 }
2661
2662 case PCI_DEVICE_ID_ARECA_1110:
2663 case PCI_DEVICE_ID_ARECA_1120:
2664 case PCI_DEVICE_ID_ARECA_1130:
2665 case PCI_DEVICE_ID_ARECA_1160:
2666 case PCI_DEVICE_ID_ARECA_1170:
2667 case PCI_DEVICE_ID_ARECA_1210:
2668 case PCI_DEVICE_ID_ARECA_1220:
2669 case PCI_DEVICE_ID_ARECA_1230:
2670 case PCI_DEVICE_ID_ARECA_1231:
2671 case PCI_DEVICE_ID_ARECA_1260:
2672 case PCI_DEVICE_ID_ARECA_1261:
2673 case PCI_DEVICE_ID_ARECA_1270:
2674 case PCI_DEVICE_ID_ARECA_1280:
2675 case PCI_DEVICE_ID_ARECA_1212:
2676 case PCI_DEVICE_ID_ARECA_1222:
2677 case PCI_DEVICE_ID_ARECA_1380:
2678 case PCI_DEVICE_ID_ARECA_1381:
2679 case PCI_DEVICE_ID_ARECA_1680:
2680 case PCI_DEVICE_ID_ARECA_1681:
2681 {
2682 uint32_t *iop_mu_regs_map0;
2683
2684 acb->adapter_type = ACB_ADAPTER_TYPE_A; /* intel */
2685 dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2686 if (ddi_regs_map_setup(acb->dev_info, 1,
2687 (caddr_t *)&iop_mu_regs_map0, 0,
2688 sizeof (struct HBA_msgUnit), &acb->dev_acc_attr,
2689 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2690 arcmsr_warn(acb, "unable to map registers");
2691 return (DDI_FAILURE);
2692 }
2693
2694 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2695 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2696 DDI_SUCCESS) {
2697 arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2698 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2699 return (DDI_FAILURE);
2700 }
2701
2702 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2703 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2704 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2705 &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2706 arcmsr_warn(acb, "ddi_dma_mem_alloc failed", instance);
2707 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2708 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2709 return (DDI_FAILURE);
2710 }
2711
2712 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2713 (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2714 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2715 &count) != DDI_DMA_MAPPED) {
2716 arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2717 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2718 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2719 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2720 return (DDI_FAILURE);
2721 }
2722 bzero(arcmsr_ccbs_area, dma_sync_size);
2723 offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2724 - PtrToNum(arcmsr_ccbs_area));
2725 arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2726 /* ioport base */
2727 acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2728 break;
2729 }
2730
2731 default:
2732 arcmsr_warn(acb, "Unknown RAID adapter type!");
2733 return (DDI_FAILURE);
2734 }
2735 arcmsr_init_list_head(&acb->ccb_complete_list);
2736 /* here we can not access pci configuration again */
2737 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2738 ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ);
2739 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2740 /* physical address of acb->pccb_pool */
2741 cdb_phyaddr = acb->ccb_cookie.dmac_address + offset;
2742
2743 pccb_tmp = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
2744
2745 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2746 pccb_tmp->cdb_phyaddr_pattern =
2747 (acb->adapter_type == ACB_ADAPTER_TYPE_C) ?
2748 cdb_phyaddr : (cdb_phyaddr >> 5);
2749 pccb_tmp->acb = acb;
2750 acb->ccbworkingQ[i] = acb->pccb_pool[i] = pccb_tmp;
2751 cdb_phyaddr = cdb_phyaddr + realccb_size;
2752 pccb_tmp = (struct CCB *)NumToPtr(PtrToNum(pccb_tmp) +
2753 realccb_size);
2754 }
2755 acb->vir2phy_offset = PtrToNum(pccb_tmp) - cdb_phyaddr;
2756
2757 /* disable all outbound interrupt */
2758 intmask_org = arcmsr_disable_allintr(acb);
2759
2760 if (!arcmsr_iop_confirm(acb)) {
2761 arcmsr_warn(acb, "arcmsr_iop_confirm error", instance);
2762 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2763 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2764 return (DDI_FAILURE);
2765 }
2766
2767 for (id = 0; id < ARCMSR_MAX_TARGETID; id++) {
2768 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
2769 acb->devstate[id][lun] = ARECA_RAID_GONE;
2770 }
2771 }
2772
2773 /* enable outbound Post Queue, outbound doorbell Interrupt */
2774 arcmsr_enable_allintr(acb, intmask_org);
2775
2776 return (0);
2777 }
2778
2779 static int
arcmsr_do_ddi_attach(dev_info_t * dev_info,int instance)2780 arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance)
2781 {
2782 scsi_hba_tran_t *hba_trans;
2783 ddi_device_acc_attr_t dev_acc_attr;
2784 struct ACB *acb;
2785 uint16_t wval;
2786 int raid6 = 1;
2787 char *type;
2788 int intr_types;
2789
2790
2791 /*
2792 * Soft State Structure
2793 * The driver should allocate the per-device-instance
2794 * soft state structure, being careful to clean up properly if
2795 * an error occurs. Allocate data structure.
2796 */
2797 if (ddi_soft_state_zalloc(arcmsr_soft_state, instance) != DDI_SUCCESS) {
2798 arcmsr_warn(NULL, "ddi_soft_state_zalloc failed");
2799 return (DDI_FAILURE);
2800 }
2801
2802 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
2803 ASSERT(acb);
2804
2805 arcmsr_mutex_init(acb);
2806
2807 /* acb is already zalloc()d so we don't need to bzero() it */
2808 dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2809 dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2810 dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2811
2812 acb->dev_info = dev_info;
2813 acb->dev_acc_attr = dev_acc_attr;
2814
2815 /*
2816 * The driver, if providing DMA, should also check that its hardware is
2817 * installed in a DMA-capable slot
2818 */
2819 if (ddi_slaveonly(dev_info) == DDI_SUCCESS) {
2820 arcmsr_warn(acb, "hardware is not installed in"
2821 " a DMA-capable slot");
2822 goto error_level_0;
2823 }
2824 if (pci_config_setup(dev_info, &acb->pci_acc_handle) != DDI_SUCCESS) {
2825 arcmsr_warn(acb, "pci_config_setup() failed, attach failed");
2826 goto error_level_0;
2827 }
2828
2829 wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_VENID);
2830 if (wval != PCI_VENDOR_ID_ARECA) {
2831 arcmsr_warn(acb,
2832 "'vendorid (0x%04x) does not match 0x%04x "
2833 "(PCI_VENDOR_ID_ARECA)",
2834 wval, PCI_VENDOR_ID_ARECA);
2835 goto error_level_0;
2836 }
2837
2838 wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID);
2839 switch (wval) {
2840 case PCI_DEVICE_ID_ARECA_1110:
2841 case PCI_DEVICE_ID_ARECA_1210:
2842 case PCI_DEVICE_ID_ARECA_1201:
2843 raid6 = 0;
2844 /*FALLTHRU*/
2845 case PCI_DEVICE_ID_ARECA_1120:
2846 case PCI_DEVICE_ID_ARECA_1130:
2847 case PCI_DEVICE_ID_ARECA_1160:
2848 case PCI_DEVICE_ID_ARECA_1170:
2849 case PCI_DEVICE_ID_ARECA_1220:
2850 case PCI_DEVICE_ID_ARECA_1230:
2851 case PCI_DEVICE_ID_ARECA_1260:
2852 case PCI_DEVICE_ID_ARECA_1270:
2853 case PCI_DEVICE_ID_ARECA_1280:
2854 type = "SATA 3G";
2855 break;
2856 case PCI_DEVICE_ID_ARECA_1380:
2857 case PCI_DEVICE_ID_ARECA_1381:
2858 case PCI_DEVICE_ID_ARECA_1680:
2859 case PCI_DEVICE_ID_ARECA_1681:
2860 type = "SAS 3G";
2861 break;
2862 case PCI_DEVICE_ID_ARECA_1880:
2863 type = "SAS 6G";
2864 break;
2865 default:
2866 type = "X-TYPE";
2867 arcmsr_warn(acb, "Unknown Host Adapter RAID Controller!");
2868 goto error_level_0;
2869 }
2870
2871 arcmsr_log(acb, CE_CONT, "Areca %s Host Adapter RAID Controller%s\n",
2872 type, raid6 ? " (RAID6 capable)" : "");
2873
2874 /* we disable iop interrupt here */
2875 if (arcmsr_initialize(acb) == DDI_FAILURE) {
2876 arcmsr_warn(acb, "arcmsr_initialize failed");
2877 goto error_level_1;
2878 }
2879
2880 /* Allocate a transport structure */
2881 hba_trans = scsi_hba_tran_alloc(dev_info, SCSI_HBA_CANSLEEP);
2882 if (hba_trans == NULL) {
2883 arcmsr_warn(acb, "scsi_hba_tran_alloc failed");
2884 goto error_level_2;
2885 }
2886 acb->scsi_hba_transport = hba_trans;
2887 acb->dev_info = dev_info;
2888 /* init scsi host adapter transport entry */
2889 hba_trans->tran_hba_private = acb;
2890 hba_trans->tran_tgt_private = NULL;
2891 /*
2892 * If no per-target initialization is required, the HBA can leave
2893 * tran_tgt_init set to NULL.
2894 */
2895 hba_trans->tran_tgt_init = arcmsr_tran_tgt_init;
2896 hba_trans->tran_tgt_probe = scsi_hba_probe;
2897 hba_trans->tran_tgt_free = NULL;
2898 hba_trans->tran_start = arcmsr_tran_start;
2899 hba_trans->tran_abort = arcmsr_tran_abort;
2900 hba_trans->tran_reset = arcmsr_tran_reset;
2901 hba_trans->tran_getcap = arcmsr_tran_getcap;
2902 hba_trans->tran_setcap = arcmsr_tran_setcap;
2903 hba_trans->tran_init_pkt = arcmsr_tran_init_pkt;
2904 hba_trans->tran_destroy_pkt = arcmsr_tran_destroy_pkt;
2905 hba_trans->tran_dmafree = arcmsr_tran_dmafree;
2906 hba_trans->tran_sync_pkt = arcmsr_tran_sync_pkt;
2907
2908 hba_trans->tran_reset_notify = NULL;
2909 hba_trans->tran_get_bus_addr = NULL;
2910 hba_trans->tran_get_name = NULL;
2911 hba_trans->tran_quiesce = NULL;
2912 hba_trans->tran_unquiesce = NULL;
2913 hba_trans->tran_bus_reset = NULL;
2914 hba_trans->tran_bus_config = arcmsr_tran_bus_config;
2915 hba_trans->tran_add_eventcall = NULL;
2916 hba_trans->tran_get_eventcookie = NULL;
2917 hba_trans->tran_post_event = NULL;
2918 hba_trans->tran_remove_eventcall = NULL;
2919
2920 /* iop init and enable interrupt here */
2921 arcmsr_iop_init(acb);
2922
2923 /* Get supported interrupt types */
2924 if (ddi_intr_get_supported_types(dev_info, &intr_types) !=
2925 DDI_SUCCESS) {
2926 arcmsr_warn(acb, "ddi_intr_get_supported_types failed");
2927 goto error_level_3;
2928 }
2929 if (intr_types & DDI_INTR_TYPE_FIXED) {
2930 if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2931 goto error_level_5;
2932 } else if (intr_types & DDI_INTR_TYPE_MSI) {
2933 if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2934 goto error_level_5;
2935 }
2936
2937 /*
2938 * The driver should attach this instance of the device, and
2939 * perform error cleanup if necessary
2940 */
2941 if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
2942 hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
2943 arcmsr_warn(acb, "scsi_hba_attach_setup failed");
2944 goto error_level_5;
2945 }
2946
2947 /* Create a taskq for dealing with dr events */
2948 if ((acb->taskq = ddi_taskq_create(dev_info, "arcmsr_dr_taskq", 1,
2949 TASKQ_DEFAULTPRI, 0)) == NULL) {
2950 arcmsr_warn(acb, "ddi_taskq_create failed");
2951 goto error_level_8;
2952 }
2953
2954 acb->timeout_count = 0;
2955 /* active ccbs "timeout" watchdog */
2956 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
2957 (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
2958 acb->timeout_sc_id = timeout(arcmsr_devMap_monitor, (caddr_t)acb,
2959 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
2960
2961 /* report device info */
2962 ddi_report_dev(dev_info);
2963
2964 return (DDI_SUCCESS);
2965
2966 error_level_8:
2967
2968 error_level_7:
2969 error_level_6:
2970 (void) scsi_hba_detach(dev_info);
2971
2972 error_level_5:
2973 arcmsr_remove_intr(acb);
2974
2975 error_level_3:
2976 error_level_4:
2977 if (acb->scsi_hba_transport)
2978 scsi_hba_tran_free(acb->scsi_hba_transport);
2979
2980 error_level_2:
2981 if (acb->ccbs_acc_handle)
2982 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2983 if (acb->ccbs_pool_handle)
2984 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2985
2986 error_level_1:
2987 if (acb->pci_acc_handle)
2988 pci_config_teardown(&acb->pci_acc_handle);
2989 arcmsr_mutex_destroy(acb);
2990 ddi_soft_state_free(arcmsr_soft_state, instance);
2991
2992 error_level_0:
2993 return (DDI_FAILURE);
2994 }
2995
2996
2997 static void
arcmsr_vlog(struct ACB * acb,int level,char * fmt,va_list ap)2998 arcmsr_vlog(struct ACB *acb, int level, char *fmt, va_list ap)
2999 {
3000 char buf[256];
3001
3002 if (acb != NULL) {
3003 (void) snprintf(buf, sizeof (buf), "%s%d: %s",
3004 ddi_driver_name(acb->dev_info),
3005 ddi_get_instance(acb->dev_info), fmt);
3006 fmt = buf;
3007 }
3008 vcmn_err(level, fmt, ap);
3009 }
3010
3011 static void
arcmsr_log(struct ACB * acb,int level,char * fmt,...)3012 arcmsr_log(struct ACB *acb, int level, char *fmt, ...)
3013 {
3014 va_list ap;
3015
3016 va_start(ap, fmt);
3017 arcmsr_vlog(acb, level, fmt, ap);
3018 va_end(ap);
3019 }
3020
3021 static void
arcmsr_warn(struct ACB * acb,char * fmt,...)3022 arcmsr_warn(struct ACB *acb, char *fmt, ...)
3023 {
3024 va_list ap;
3025
3026 va_start(ap, fmt);
3027 arcmsr_vlog(acb, CE_WARN, fmt, ap);
3028 va_end(ap);
3029 }
3030
3031 static void
arcmsr_init_list_head(struct list_head * list)3032 arcmsr_init_list_head(struct list_head *list)
3033 {
3034 list->next = list;
3035 list->prev = list;
3036 }
3037
3038 static void
arcmsr_x_list_del(struct list_head * prev,struct list_head * next)3039 arcmsr_x_list_del(struct list_head *prev, struct list_head *next)
3040 {
3041 next->prev = prev;
3042 prev->next = next;
3043 }
3044
3045 static void
arcmsr_x_list_add(struct list_head * new_one,struct list_head * prev,struct list_head * next)3046 arcmsr_x_list_add(struct list_head *new_one, struct list_head *prev,
3047 struct list_head *next)
3048 {
3049 next->prev = new_one;
3050 new_one->next = next;
3051 new_one->prev = prev;
3052 prev->next = new_one;
3053 }
3054
3055 static void
arcmsr_list_add_tail(kmutex_t * list_lock,struct list_head * new_one,struct list_head * head)3056 arcmsr_list_add_tail(kmutex_t *list_lock, struct list_head *new_one,
3057 struct list_head *head)
3058 {
3059 mutex_enter(list_lock);
3060 arcmsr_x_list_add(new_one, head->prev, head);
3061 mutex_exit(list_lock);
3062 }
3063
3064 static struct list_head *
arcmsr_list_get_first(kmutex_t * list_lock,struct list_head * head)3065 arcmsr_list_get_first(kmutex_t *list_lock, struct list_head *head)
3066 {
3067 struct list_head *one = NULL;
3068
3069 mutex_enter(list_lock);
3070 if (head->next == head) {
3071 mutex_exit(list_lock);
3072 return (NULL);
3073 }
3074 one = head->next;
3075 arcmsr_x_list_del(one->prev, one->next);
3076 arcmsr_init_list_head(one);
3077 mutex_exit(list_lock);
3078 return (one);
3079 }
3080
3081 static struct CCB *
arcmsr_get_complete_ccb_from_list(struct ACB * acb)3082 arcmsr_get_complete_ccb_from_list(struct ACB *acb)
3083 {
3084 struct list_head *first_complete_ccb_list = NULL;
3085 struct CCB *ccb;
3086
3087 first_complete_ccb_list =
3088 arcmsr_list_get_first(&acb->ccb_complete_list_mutex,
3089 &acb->ccb_complete_list);
3090 if (first_complete_ccb_list == NULL) {
3091 return (NULL);
3092 }
3093 ccb = (void *)((caddr_t)(first_complete_ccb_list) -
3094 offsetof(struct CCB, complete_queue_pointer));
3095 return (ccb);
3096 }
3097
3098 static struct CCB *
arcmsr_get_freeccb(struct ACB * acb)3099 arcmsr_get_freeccb(struct ACB *acb)
3100 {
3101 struct CCB *ccb;
3102 int ccb_get_index, ccb_put_index;
3103
3104 mutex_enter(&acb->workingQ_mutex);
3105 ccb_put_index = acb->ccb_put_index;
3106 ccb_get_index = acb->ccb_get_index;
3107 ccb = acb->ccbworkingQ[ccb_get_index];
3108 ccb_get_index++;
3109 if (ccb_get_index >= ARCMSR_MAX_FREECCB_NUM)
3110 ccb_get_index = ccb_get_index - ARCMSR_MAX_FREECCB_NUM;
3111 if (ccb_put_index != ccb_get_index) {
3112 acb->ccb_get_index = ccb_get_index;
3113 arcmsr_init_list_head(&ccb->complete_queue_pointer);
3114 ccb->ccb_state = ARCMSR_CCB_UNBUILD;
3115 } else {
3116 ccb = NULL;
3117 }
3118 mutex_exit(&acb->workingQ_mutex);
3119 return (ccb);
3120 }
3121
3122
3123 static void
arcmsr_free_ccb(struct CCB * ccb)3124 arcmsr_free_ccb(struct CCB *ccb)
3125 {
3126 struct ACB *acb = ccb->acb;
3127
3128 if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3129 return;
3130 }
3131 mutex_enter(&acb->workingQ_mutex);
3132 ccb->ccb_state = ARCMSR_CCB_FREE;
3133 ccb->pkt = NULL;
3134 ccb->pkt_dma_handle = NULL;
3135 ccb->ccb_flags = 0;
3136 acb->ccbworkingQ[acb->ccb_put_index] = ccb;
3137 acb->ccb_put_index++;
3138 if (acb->ccb_put_index >= ARCMSR_MAX_FREECCB_NUM)
3139 acb->ccb_put_index =
3140 acb->ccb_put_index - ARCMSR_MAX_FREECCB_NUM;
3141 mutex_exit(&acb->workingQ_mutex);
3142 }
3143
3144
3145 static void
arcmsr_ccbs_timeout(void * arg)3146 arcmsr_ccbs_timeout(void* arg)
3147 {
3148 struct ACB *acb = (struct ACB *)arg;
3149 struct CCB *ccb;
3150 int i, instance, timeout_count = 0;
3151 uint32_t intmask_org;
3152 time_t current_time = ddi_get_time();
3153
3154 intmask_org = arcmsr_disable_allintr(acb);
3155 mutex_enter(&acb->isr_mutex);
3156 if (acb->ccboutstandingcount != 0) {
3157 /* check each ccb */
3158 i = ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
3159 DDI_DMA_SYNC_FORKERNEL);
3160 if (i != DDI_SUCCESS) {
3161 if ((acb->timeout_id != 0) &&
3162 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3163 /* do pkt timeout check each 60 secs */
3164 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3165 (void*)acb, (ARCMSR_TIMEOUT_WATCH *
3166 drv_usectohz(1000000)));
3167 }
3168 mutex_exit(&acb->isr_mutex);
3169 arcmsr_enable_allintr(acb, intmask_org);
3170 return;
3171 }
3172 instance = ddi_get_instance(acb->dev_info);
3173 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3174 ccb = acb->pccb_pool[i];
3175 if (ccb->acb != acb) {
3176 break;
3177 }
3178 if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3179 continue;
3180 }
3181 if (ccb->pkt == NULL) {
3182 continue;
3183 }
3184 if (ccb->pkt->pkt_time == 0) {
3185 continue;
3186 }
3187 if (ccb->ccb_time >= current_time) {
3188 continue;
3189 }
3190 int id = ccb->pkt->pkt_address.a_target;
3191 int lun = ccb->pkt->pkt_address.a_lun;
3192 if (ccb->ccb_state == ARCMSR_CCB_START) {
3193 uint8_t *cdb = (uint8_t *)&ccb->arcmsr_cdb.Cdb;
3194
3195 timeout_count++;
3196 arcmsr_warn(acb,
3197 "scsi target %d lun %d cmd=0x%x "
3198 "command timeout, ccb=0x%p",
3199 instance, id, lun, *cdb, (void *)ccb);
3200 ccb->ccb_state = ARCMSR_CCB_TIMEOUT;
3201 ccb->pkt->pkt_reason = CMD_TIMEOUT;
3202 ccb->pkt->pkt_statistics = STAT_TIMEOUT;
3203 /* acb->devstate[id][lun] = ARECA_RAID_GONE; */
3204 arcmsr_ccb_complete(ccb, 1);
3205 continue;
3206 } else if ((ccb->ccb_state & ARCMSR_CCB_CAN_BE_FREE) ==
3207 ARCMSR_CCB_CAN_BE_FREE) {
3208 arcmsr_free_ccb(ccb);
3209 }
3210 }
3211 }
3212 if ((acb->timeout_id != 0) &&
3213 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3214 /* do pkt timeout check each 60 secs */
3215 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3216 (void*)acb, (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
3217 }
3218 mutex_exit(&acb->isr_mutex);
3219 arcmsr_enable_allintr(acb, intmask_org);
3220 }
3221
3222 static void
arcmsr_abort_dr_ccbs(struct ACB * acb,uint16_t target,uint8_t lun)3223 arcmsr_abort_dr_ccbs(struct ACB *acb, uint16_t target, uint8_t lun)
3224 {
3225 struct CCB *ccb;
3226 uint32_t intmask_org;
3227 int i;
3228
3229 /* disable all outbound interrupts */
3230 intmask_org = arcmsr_disable_allintr(acb);
3231 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3232 ccb = acb->pccb_pool[i];
3233 if (ccb->ccb_state == ARCMSR_CCB_START) {
3234 if ((target == ccb->pkt->pkt_address.a_target) &&
3235 (lun == ccb->pkt->pkt_address.a_lun)) {
3236 ccb->ccb_state = ARCMSR_CCB_ABORTED;
3237 ccb->pkt->pkt_reason = CMD_ABORTED;
3238 ccb->pkt->pkt_statistics |= STAT_ABORTED;
3239 arcmsr_ccb_complete(ccb, 1);
3240 arcmsr_log(acb, CE_NOTE,
3241 "abort T%dL%d ccb", target, lun);
3242 }
3243 }
3244 }
3245 /* enable outbound Post Queue, outbound doorbell Interrupt */
3246 arcmsr_enable_allintr(acb, intmask_org);
3247 }
3248
3249 static int
arcmsr_scsi_device_probe(struct ACB * acb,uint16_t tgt,uint8_t lun)3250 arcmsr_scsi_device_probe(struct ACB *acb, uint16_t tgt, uint8_t lun)
3251 {
3252 struct scsi_device sd;
3253 dev_info_t *child;
3254 int rval;
3255
3256 bzero(&sd, sizeof (struct scsi_device));
3257 sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
3258 sd.sd_address.a_target = (uint16_t)tgt;
3259 sd.sd_address.a_lun = (uint8_t)lun;
3260 if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
3261 rval = scsi_hba_probe(&sd, NULL);
3262 if (rval == SCSIPROBE_EXISTS) {
3263 rval = ndi_devi_online(child, NDI_ONLINE_ATTACH);
3264 if (rval != NDI_SUCCESS) {
3265 arcmsr_warn(acb, "unable to online T%dL%d",
3266 tgt, lun);
3267 } else {
3268 arcmsr_log(acb, CE_NOTE, "T%dL%d onlined",
3269 tgt, lun);
3270 }
3271 }
3272 } else {
3273 rval = scsi_hba_probe(&sd, NULL);
3274 if (rval == SCSIPROBE_EXISTS)
3275 rval = arcmsr_config_child(acb, &sd, NULL);
3276 }
3277 scsi_unprobe(&sd);
3278 return (rval);
3279 }
3280
3281 static void
arcmsr_dr_handle(struct ACB * acb)3282 arcmsr_dr_handle(struct ACB *acb)
3283 {
3284 char *acb_dev_map = (char *)acb->device_map;
3285 char *devicemap;
3286 char temp;
3287 uint16_t target;
3288 uint8_t lun;
3289 char diff;
3290 int circ = 0;
3291 dev_info_t *dip;
3292 ddi_acc_handle_t reg;
3293
3294 switch (acb->adapter_type) {
3295 case ACB_ADAPTER_TYPE_A:
3296 {
3297 struct HBA_msgUnit *phbamu;
3298
3299 phbamu = (struct HBA_msgUnit *)acb->pmu;
3300 devicemap = (char *)&phbamu->msgcode_rwbuffer[21];
3301 reg = acb->reg_mu_acc_handle0;
3302 break;
3303 }
3304
3305 case ACB_ADAPTER_TYPE_B:
3306 {
3307 struct HBB_msgUnit *phbbmu;
3308
3309 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3310 devicemap = (char *)
3311 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[21];
3312 reg = acb->reg_mu_acc_handle1;
3313 break;
3314 }
3315
3316 case ACB_ADAPTER_TYPE_C:
3317 {
3318 struct HBC_msgUnit *phbcmu;
3319
3320 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3321 devicemap = (char *)&phbcmu->msgcode_rwbuffer[21];
3322 reg = acb->reg_mu_acc_handle0;
3323 break;
3324 }
3325
3326 }
3327
3328 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
3329 temp = CHIP_REG_READ8(reg, devicemap);
3330 diff = (*acb_dev_map)^ temp;
3331 if (diff != 0) {
3332 *acb_dev_map = temp;
3333 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
3334 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
3335 ndi_devi_enter(acb->dev_info, &circ);
3336 acb->devstate[target][lun] =
3337 ARECA_RAID_GOOD;
3338 (void) arcmsr_scsi_device_probe(acb,
3339 target, lun);
3340 ndi_devi_exit(acb->dev_info, circ);
3341 arcmsr_log(acb, CE_NOTE,
3342 "T%dL%d on-line", target, lun);
3343 } else if ((temp & 0x01) == 0 &&
3344 (diff & 0x01) == 1) {
3345 dip = arcmsr_find_child(acb, target,
3346 lun);
3347 if (dip != NULL) {
3348 acb->devstate[target][lun] =
3349 ARECA_RAID_GONE;
3350 if (mutex_owned(&acb->
3351 isr_mutex)) {
3352 arcmsr_abort_dr_ccbs(
3353 acb, target, lun);
3354 (void)
3355 ndi_devi_offline(
3356 dip,
3357 NDI_DEVI_REMOVE |
3358 NDI_DEVI_OFFLINE);
3359 } else {
3360 mutex_enter(&acb->
3361 isr_mutex);
3362 arcmsr_abort_dr_ccbs(
3363 acb, target, lun);
3364 (void)
3365 ndi_devi_offline(
3366 dip,
3367 NDI_DEVI_REMOVE |
3368 NDI_DEVI_OFFLINE);
3369 mutex_exit(&acb->
3370 isr_mutex);
3371 }
3372 }
3373 arcmsr_log(acb, CE_NOTE,
3374 "T%dL%d off-line", target, lun);
3375 }
3376 temp >>= 1;
3377 diff >>= 1;
3378 }
3379 }
3380 devicemap++;
3381 acb_dev_map++;
3382 }
3383 }
3384
3385
3386 static void
arcmsr_devMap_monitor(void * arg)3387 arcmsr_devMap_monitor(void* arg)
3388 {
3389
3390 struct ACB *acb = (struct ACB *)arg;
3391 switch (acb->adapter_type) {
3392 case ACB_ADAPTER_TYPE_A:
3393 {
3394 struct HBA_msgUnit *phbamu;
3395
3396 phbamu = (struct HBA_msgUnit *)acb->pmu;
3397 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3398 &phbamu->inbound_msgaddr0,
3399 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3400 break;
3401 }
3402
3403 case ACB_ADAPTER_TYPE_B:
3404 {
3405 struct HBB_msgUnit *phbbmu;
3406
3407 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3408 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3409 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3410 ARCMSR_MESSAGE_GET_CONFIG);
3411 break;
3412 }
3413
3414 case ACB_ADAPTER_TYPE_C:
3415 {
3416 struct HBC_msgUnit *phbcmu;
3417
3418 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3419 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3420 &phbcmu->inbound_msgaddr0,
3421 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3422 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3423 &phbcmu->inbound_doorbell,
3424 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3425 break;
3426 }
3427
3428 }
3429
3430 if ((acb->timeout_id != 0) &&
3431 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3432 /* do pkt timeout check each 5 secs */
3433 acb->timeout_id = timeout(arcmsr_devMap_monitor, (void*)acb,
3434 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
3435 }
3436 }
3437
3438
3439 static uint32_t
arcmsr_disable_allintr(struct ACB * acb)3440 arcmsr_disable_allintr(struct ACB *acb) {
3441
3442 uint32_t intmask_org;
3443
3444 switch (acb->adapter_type) {
3445 case ACB_ADAPTER_TYPE_A:
3446 {
3447 struct HBA_msgUnit *phbamu;
3448
3449 phbamu = (struct HBA_msgUnit *)acb->pmu;
3450 /* disable all outbound interrupt */
3451 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3452 &phbamu->outbound_intmask);
3453 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3454 &phbamu->outbound_intmask,
3455 intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
3456 break;
3457 }
3458
3459 case ACB_ADAPTER_TYPE_B:
3460 {
3461 struct HBB_msgUnit *phbbmu;
3462
3463 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3464 /* disable all outbound interrupt */
3465 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3466 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask);
3467 /* disable all interrupts */
3468 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3469 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask, 0);
3470 break;
3471 }
3472
3473 case ACB_ADAPTER_TYPE_C:
3474 {
3475 struct HBC_msgUnit *phbcmu;
3476
3477 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3478 /* disable all outbound interrupt */
3479 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3480 &phbcmu->host_int_mask); /* disable outbound message0 int */
3481 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3482 &phbcmu->host_int_mask,
3483 intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
3484 break;
3485 }
3486
3487 }
3488 return (intmask_org);
3489 }
3490
3491
3492 static void
arcmsr_enable_allintr(struct ACB * acb,uint32_t intmask_org)3493 arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org) {
3494
3495 int mask;
3496
3497 switch (acb->adapter_type) {
3498 case ACB_ADAPTER_TYPE_A:
3499 {
3500 struct HBA_msgUnit *phbamu;
3501
3502 phbamu = (struct HBA_msgUnit *)acb->pmu;
3503 /*
3504 * enable outbound Post Queue, outbound doorbell message0
3505 * Interrupt
3506 */
3507 mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
3508 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE |
3509 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
3510 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3511 &phbamu->outbound_intmask, intmask_org & mask);
3512 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
3513 break;
3514 }
3515
3516 case ACB_ADAPTER_TYPE_B:
3517 {
3518 struct HBB_msgUnit *phbbmu;
3519
3520 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3521 mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK |
3522 ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE |
3523 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
3524 /* 1=interrupt enable, 0=interrupt disable */
3525 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3526 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask,
3527 intmask_org | mask);
3528 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
3529 break;
3530 }
3531
3532 case ACB_ADAPTER_TYPE_C:
3533 {
3534 struct HBC_msgUnit *phbcmu;
3535
3536 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3537 /* enable outbound Post Queue,outbound doorbell Interrupt */
3538 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK |
3539 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK |
3540 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
3541 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3542 &phbcmu->host_int_mask, intmask_org & mask);
3543 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
3544 break;
3545 }
3546
3547 }
3548 }
3549
3550
3551 static void
arcmsr_iop_parking(struct ACB * acb)3552 arcmsr_iop_parking(struct ACB *acb)
3553 {
3554 /* stop adapter background rebuild */
3555 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
3556 uint32_t intmask_org;
3557
3558 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
3559 /* disable all outbound interrupt */
3560 intmask_org = arcmsr_disable_allintr(acb);
3561 switch (acb->adapter_type) {
3562 case ACB_ADAPTER_TYPE_A:
3563 arcmsr_stop_hba_bgrb(acb);
3564 arcmsr_flush_hba_cache(acb);
3565 break;
3566
3567 case ACB_ADAPTER_TYPE_B:
3568 arcmsr_stop_hbb_bgrb(acb);
3569 arcmsr_flush_hbb_cache(acb);
3570 break;
3571
3572 case ACB_ADAPTER_TYPE_C:
3573 arcmsr_stop_hbc_bgrb(acb);
3574 arcmsr_flush_hbc_cache(acb);
3575 break;
3576 }
3577 /*
3578 * enable outbound Post Queue
3579 * enable outbound doorbell Interrupt
3580 */
3581 arcmsr_enable_allintr(acb, intmask_org);
3582 }
3583 }
3584
3585
3586 static uint8_t
arcmsr_hba_wait_msgint_ready(struct ACB * acb)3587 arcmsr_hba_wait_msgint_ready(struct ACB *acb)
3588 {
3589 uint32_t i;
3590 uint8_t retries = 0x00;
3591 struct HBA_msgUnit *phbamu;
3592
3593
3594 phbamu = (struct HBA_msgUnit *)acb->pmu;
3595
3596 do {
3597 for (i = 0; i < 100; i++) {
3598 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3599 &phbamu->outbound_intstatus) &
3600 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
3601 /* clear interrupt */
3602 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3603 &phbamu->outbound_intstatus,
3604 ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3605 return (TRUE);
3606 }
3607 drv_usecwait(10000);
3608 if (ddi_in_panic()) {
3609 /* clear interrupts */
3610 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3611 &phbamu->outbound_intstatus,
3612 ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3613 return (TRUE);
3614 }
3615 } /* max 1 second */
3616 } while (retries++ < 20); /* max 20 seconds */
3617 return (FALSE);
3618 }
3619
3620
3621 static uint8_t
arcmsr_hbb_wait_msgint_ready(struct ACB * acb)3622 arcmsr_hbb_wait_msgint_ready(struct ACB *acb)
3623 {
3624 struct HBB_msgUnit *phbbmu;
3625 uint32_t i;
3626 uint8_t retries = 0x00;
3627
3628 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3629
3630 do {
3631 for (i = 0; i < 100; i++) {
3632 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3633 &phbbmu->hbb_doorbell->iop2drv_doorbell) &
3634 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
3635 /* clear interrupt */
3636 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3637 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3638 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3639 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3640 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3641 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3642 return (TRUE);
3643 }
3644 drv_usecwait(10000);
3645 if (ddi_in_panic()) {
3646 /* clear interrupts */
3647 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3648 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3649 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3650 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3651 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3652 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3653 return (TRUE);
3654 }
3655 } /* max 1 second */
3656 } while (retries++ < 20); /* max 20 seconds */
3657
3658 return (FALSE);
3659 }
3660
3661
3662 static uint8_t
arcmsr_hbc_wait_msgint_ready(struct ACB * acb)3663 arcmsr_hbc_wait_msgint_ready(struct ACB *acb)
3664 {
3665 uint32_t i;
3666 uint8_t retries = 0x00;
3667 struct HBC_msgUnit *phbcmu;
3668 uint32_t c = ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR;
3669
3670
3671 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3672
3673 do {
3674 for (i = 0; i < 100; i++) {
3675 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3676 &phbcmu->outbound_doorbell) &
3677 ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
3678 /* clear interrupt */
3679 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3680 &phbcmu->outbound_doorbell_clear, c);
3681 return (TRUE);
3682 }
3683 drv_usecwait(10000);
3684 if (ddi_in_panic()) {
3685 /* clear interrupts */
3686 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3687 &phbcmu->outbound_doorbell_clear, c);
3688 return (TRUE);
3689 }
3690 } /* max 1 second */
3691 } while (retries++ < 20); /* max 20 seconds */
3692 return (FALSE);
3693 }
3694
3695 static void
arcmsr_flush_hba_cache(struct ACB * acb)3696 arcmsr_flush_hba_cache(struct ACB *acb) {
3697
3698 struct HBA_msgUnit *phbamu;
3699 int retry_count = 30;
3700
3701 /* enlarge wait flush adapter cache time: 10 minutes */
3702
3703 phbamu = (struct HBA_msgUnit *)acb->pmu;
3704
3705 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3706 ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3707 do {
3708 if (arcmsr_hba_wait_msgint_ready(acb)) {
3709 break;
3710 } else {
3711 retry_count--;
3712 }
3713 } while (retry_count != 0);
3714 }
3715
3716
3717
3718 static void
arcmsr_flush_hbb_cache(struct ACB * acb)3719 arcmsr_flush_hbb_cache(struct ACB *acb) {
3720
3721 struct HBB_msgUnit *phbbmu;
3722 int retry_count = 30;
3723
3724 /* enlarge wait flush adapter cache time: 10 minutes */
3725
3726 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3727 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3728 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3729 ARCMSR_MESSAGE_FLUSH_CACHE);
3730 do {
3731 if (arcmsr_hbb_wait_msgint_ready(acb)) {
3732 break;
3733 } else {
3734 retry_count--;
3735 }
3736 } while (retry_count != 0);
3737 }
3738
3739
3740 static void
arcmsr_flush_hbc_cache(struct ACB * acb)3741 arcmsr_flush_hbc_cache(struct ACB *acb)
3742 {
3743 struct HBC_msgUnit *phbcmu;
3744 int retry_count = 30;
3745
3746 /* enlarge wait flush adapter cache time: 10 minutes */
3747
3748 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3749
3750 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3751 ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3752 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3753 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3754 do {
3755 if (arcmsr_hbc_wait_msgint_ready(acb)) {
3756 break;
3757 } else {
3758 retry_count--;
3759 }
3760 } while (retry_count != 0);
3761 }
3762
3763
3764
3765 static uint8_t
arcmsr_abort_hba_allcmd(struct ACB * acb)3766 arcmsr_abort_hba_allcmd(struct ACB *acb)
3767 {
3768 struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
3769
3770 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3771 ARCMSR_INBOUND_MESG0_ABORT_CMD);
3772
3773 if (!arcmsr_hba_wait_msgint_ready(acb)) {
3774 arcmsr_warn(acb,
3775 "timeout while waiting for 'abort all "
3776 "outstanding commands'");
3777 return (0xff);
3778 }
3779 return (0x00);
3780 }
3781
3782
3783
3784 static uint8_t
arcmsr_abort_hbb_allcmd(struct ACB * acb)3785 arcmsr_abort_hbb_allcmd(struct ACB *acb)
3786 {
3787 struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
3788
3789 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3790 &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
3791
3792 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
3793 arcmsr_warn(acb,
3794 "timeout while waiting for 'abort all "
3795 "outstanding commands'");
3796 return (0x00);
3797 }
3798 return (0x00);
3799 }
3800
3801
3802 static uint8_t
arcmsr_abort_hbc_allcmd(struct ACB * acb)3803 arcmsr_abort_hbc_allcmd(struct ACB *acb)
3804 {
3805 struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
3806
3807 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3808 ARCMSR_INBOUND_MESG0_ABORT_CMD);
3809 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3810 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3811
3812 if (!arcmsr_hbc_wait_msgint_ready(acb)) {
3813 arcmsr_warn(acb,
3814 "timeout while waiting for 'abort all "
3815 "outstanding commands'");
3816 return (0xff);
3817 }
3818 return (0x00);
3819 }
3820
3821
3822 static void
arcmsr_done4abort_postqueue(struct ACB * acb)3823 arcmsr_done4abort_postqueue(struct ACB *acb)
3824 {
3825
3826 struct CCB *ccb;
3827 uint32_t flag_ccb;
3828 int i = 0;
3829 boolean_t error;
3830
3831 switch (acb->adapter_type) {
3832 case ACB_ADAPTER_TYPE_A:
3833 {
3834 struct HBA_msgUnit *phbamu;
3835 uint32_t outbound_intstatus;
3836
3837 phbamu = (struct HBA_msgUnit *)acb->pmu;
3838 /* clear and abort all outbound posted Q */
3839 outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3840 &phbamu->outbound_intstatus) & acb->outbound_int_enable;
3841 /* clear interrupt */
3842 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3843 &phbamu->outbound_intstatus, outbound_intstatus);
3844 while (((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3845 &phbamu->outbound_queueport)) != 0xFFFFFFFF) &&
3846 (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3847 /* frame must be 32 bytes aligned */
3848 /* the CDB is the first field of the CCB */
3849 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
3850 /* check if command done with no error */
3851 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3852 B_TRUE : B_FALSE;
3853 arcmsr_drain_donequeue(acb, ccb, error);
3854 }
3855 break;
3856 }
3857
3858 case ACB_ADAPTER_TYPE_B:
3859 {
3860 struct HBB_msgUnit *phbbmu;
3861
3862 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3863 /* clear all outbound posted Q */
3864 /* clear doorbell interrupt */
3865 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3866 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3867 ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
3868 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
3869 if ((flag_ccb = phbbmu->done_qbuffer[i]) != 0) {
3870 phbbmu->done_qbuffer[i] = 0;
3871 /* frame must be 32 bytes aligned */
3872 ccb = NumToPtr((acb->vir2phy_offset +
3873 (flag_ccb << 5)));
3874 /* check if command done with no error */
3875 error =
3876 (flag_ccb &
3877 ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3878 B_TRUE : B_FALSE;
3879 arcmsr_drain_donequeue(acb, ccb, error);
3880 }
3881 phbbmu->post_qbuffer[i] = 0;
3882 } /* drain reply FIFO */
3883 phbbmu->doneq_index = 0;
3884 phbbmu->postq_index = 0;
3885 break;
3886 }
3887
3888 case ACB_ADAPTER_TYPE_C:
3889 {
3890 struct HBC_msgUnit *phbcmu;
3891 uint32_t ccb_cdb_phy;
3892
3893 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3894 while ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3895 &phbcmu->host_int_status) &
3896 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) &&
3897 (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3898 /* need to do */
3899 flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3900 &phbcmu->outbound_queueport_low);
3901 /* frame must be 32 bytes aligned */
3902 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3903 ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
3904 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)?
3905 B_TRUE : B_FALSE;
3906 arcmsr_drain_donequeue(acb, ccb, error);
3907 }
3908 break;
3909 }
3910
3911 }
3912 }
3913 /*
3914 * Routine Description: try to get echo from iop.
3915 * Arguments:
3916 * Return Value: Nothing.
3917 */
3918 static uint8_t
arcmsr_get_echo_from_iop(struct ACB * acb)3919 arcmsr_get_echo_from_iop(struct ACB *acb)
3920 {
3921 uint32_t intmask_org;
3922 uint8_t rtnval = 0;
3923
3924 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3925 struct HBA_msgUnit *phbamu;
3926
3927 phbamu = (struct HBA_msgUnit *)acb->pmu;
3928 intmask_org = arcmsr_disable_allintr(acb);
3929 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3930 &phbamu->inbound_msgaddr0,
3931 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3932 if (!arcmsr_hba_wait_msgint_ready(acb)) {
3933 arcmsr_warn(acb, "try to get echo from iop,"
3934 "... timeout ...");
3935 acb->acb_flags |= ACB_F_BUS_HANG_ON;
3936 rtnval = 0xFF;
3937 }
3938 /* enable all outbound interrupt */
3939 arcmsr_enable_allintr(acb, intmask_org);
3940 }
3941 return (rtnval);
3942 }
3943
3944 /*
3945 * Routine Description: Reset 80331 iop.
3946 * Arguments:
3947 * Return Value: Nothing.
3948 */
3949 static uint8_t
arcmsr_iop_reset(struct ACB * acb)3950 arcmsr_iop_reset(struct ACB *acb)
3951 {
3952 struct CCB *ccb;
3953 uint32_t intmask_org;
3954 uint8_t rtnval = 0;
3955 int i = 0;
3956
3957 if (acb->ccboutstandingcount > 0) {
3958 /* disable all outbound interrupt */
3959 intmask_org = arcmsr_disable_allintr(acb);
3960 /* clear and abort all outbound posted Q */
3961 arcmsr_done4abort_postqueue(acb);
3962 /* talk to iop 331 outstanding command aborted */
3963 rtnval = (acb->acb_flags & ACB_F_BUS_HANG_ON) ?
3964 0xFF : arcmsr_abort_host_command(acb);
3965
3966 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3967 ccb = acb->pccb_pool[i];
3968 if (ccb->ccb_state == ARCMSR_CCB_START) {
3969 /* ccb->ccb_state = ARCMSR_CCB_RESET; */
3970 ccb->pkt->pkt_reason = CMD_RESET;
3971 ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
3972 arcmsr_ccb_complete(ccb, 1);
3973 }
3974 }
3975 atomic_and_32(&acb->ccboutstandingcount, 0);
3976 /* enable all outbound interrupt */
3977 arcmsr_enable_allintr(acb, intmask_org);
3978 } else {
3979 rtnval = arcmsr_get_echo_from_iop(acb);
3980 }
3981 return (rtnval);
3982 }
3983
3984
3985 static struct QBUFFER *
arcmsr_get_iop_rqbuffer(struct ACB * acb)3986 arcmsr_get_iop_rqbuffer(struct ACB *acb)
3987 {
3988 struct QBUFFER *qb;
3989
3990 switch (acb->adapter_type) {
3991 case ACB_ADAPTER_TYPE_A:
3992 {
3993 struct HBA_msgUnit *phbamu;
3994
3995 phbamu = (struct HBA_msgUnit *)acb->pmu;
3996 qb = (struct QBUFFER *)&phbamu->message_rbuffer;
3997 break;
3998 }
3999
4000 case ACB_ADAPTER_TYPE_B:
4001 {
4002 struct HBB_msgUnit *phbbmu;
4003
4004 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4005 qb = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
4006 break;
4007 }
4008
4009 case ACB_ADAPTER_TYPE_C:
4010 {
4011 struct HBC_msgUnit *phbcmu;
4012
4013 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4014 qb = (struct QBUFFER *)&phbcmu->message_rbuffer;
4015 break;
4016 }
4017
4018 }
4019 return (qb);
4020 }
4021
4022
4023 static struct QBUFFER *
arcmsr_get_iop_wqbuffer(struct ACB * acb)4024 arcmsr_get_iop_wqbuffer(struct ACB *acb)
4025 {
4026 struct QBUFFER *qbuffer = NULL;
4027
4028 switch (acb->adapter_type) {
4029 case ACB_ADAPTER_TYPE_A:
4030 {
4031 struct HBA_msgUnit *phbamu;
4032
4033 phbamu = (struct HBA_msgUnit *)acb->pmu;
4034 qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
4035 break;
4036 }
4037
4038 case ACB_ADAPTER_TYPE_B:
4039 {
4040 struct HBB_msgUnit *phbbmu;
4041
4042 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4043 qbuffer = (struct QBUFFER *)
4044 &phbbmu->hbb_rwbuffer->message_wbuffer;
4045 break;
4046 }
4047
4048 case ACB_ADAPTER_TYPE_C:
4049 {
4050 struct HBC_msgUnit *phbcmu;
4051
4052 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4053 qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
4054 break;
4055 }
4056
4057 }
4058 return (qbuffer);
4059 }
4060
4061
4062
4063 static void
arcmsr_iop_message_read(struct ACB * acb)4064 arcmsr_iop_message_read(struct ACB *acb)
4065 {
4066 switch (acb->adapter_type) {
4067 case ACB_ADAPTER_TYPE_A:
4068 {
4069 struct HBA_msgUnit *phbamu;
4070
4071 phbamu = (struct HBA_msgUnit *)acb->pmu;
4072 /* let IOP know the data has been read */
4073 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4074 &phbamu->inbound_doorbell,
4075 ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
4076 break;
4077 }
4078
4079 case ACB_ADAPTER_TYPE_B:
4080 {
4081 struct HBB_msgUnit *phbbmu;
4082
4083 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4084 /* let IOP know the data has been read */
4085 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4086 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4087 ARCMSR_DRV2IOP_DATA_READ_OK);
4088 break;
4089 }
4090
4091 case ACB_ADAPTER_TYPE_C:
4092 {
4093 struct HBC_msgUnit *phbcmu;
4094
4095 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4096 /* let IOP know data has been read */
4097 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4098 &phbcmu->inbound_doorbell,
4099 ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
4100 break;
4101 }
4102
4103 }
4104 }
4105
4106
4107
4108 static void
arcmsr_iop_message_wrote(struct ACB * acb)4109 arcmsr_iop_message_wrote(struct ACB *acb)
4110 {
4111 switch (acb->adapter_type) {
4112 case ACB_ADAPTER_TYPE_A: {
4113 struct HBA_msgUnit *phbamu;
4114
4115 phbamu = (struct HBA_msgUnit *)acb->pmu;
4116 /*
4117 * push inbound doorbell tell iop, driver data write ok
4118 * and wait reply on next hwinterrupt for next Qbuffer post
4119 */
4120 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4121 &phbamu->inbound_doorbell,
4122 ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
4123 break;
4124 }
4125
4126 case ACB_ADAPTER_TYPE_B:
4127 {
4128 struct HBB_msgUnit *phbbmu;
4129
4130 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4131 /*
4132 * push inbound doorbell tell iop, driver data was writen
4133 * successfully, then await reply on next hwinterrupt for
4134 * next Qbuffer post
4135 */
4136 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4137 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4138 ARCMSR_DRV2IOP_DATA_WRITE_OK);
4139 break;
4140 }
4141
4142 case ACB_ADAPTER_TYPE_C:
4143 {
4144 struct HBC_msgUnit *phbcmu;
4145
4146 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4147 /*
4148 * push inbound doorbell tell iop, driver data write ok
4149 * and wait reply on next hwinterrupt for next Qbuffer post
4150 */
4151 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4152 &phbcmu->inbound_doorbell,
4153 ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
4154 break;
4155 }
4156
4157 }
4158 }
4159
4160
4161
4162 static void
arcmsr_post_ioctldata2iop(struct ACB * acb)4163 arcmsr_post_ioctldata2iop(struct ACB *acb)
4164 {
4165 uint8_t *pQbuffer;
4166 struct QBUFFER *pwbuffer;
4167 uint8_t *iop_data;
4168 int32_t allxfer_len = 0;
4169
4170 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
4171 iop_data = (uint8_t *)pwbuffer->data;
4172 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
4173 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
4174 while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
4175 (allxfer_len < 124)) {
4176 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
4177 (void) memcpy(iop_data, pQbuffer, 1);
4178 acb->wqbuf_firstidx++;
4179 /* if last index number set it to 0 */
4180 acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4181 iop_data++;
4182 allxfer_len++;
4183 }
4184 pwbuffer->data_len = allxfer_len;
4185 /*
4186 * push inbound doorbell and wait reply at hwinterrupt
4187 * routine for next Qbuffer post
4188 */
4189 arcmsr_iop_message_wrote(acb);
4190 }
4191 }
4192
4193
4194
4195 static void
arcmsr_stop_hba_bgrb(struct ACB * acb)4196 arcmsr_stop_hba_bgrb(struct ACB *acb)
4197 {
4198 struct HBA_msgUnit *phbamu;
4199
4200 phbamu = (struct HBA_msgUnit *)acb->pmu;
4201
4202 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4203 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4204 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4205 if (!arcmsr_hba_wait_msgint_ready(acb))
4206 arcmsr_warn(acb,
4207 "timeout while waiting for background rebuild completion");
4208 }
4209
4210
4211 static void
arcmsr_stop_hbb_bgrb(struct ACB * acb)4212 arcmsr_stop_hbb_bgrb(struct ACB *acb)
4213 {
4214 struct HBB_msgUnit *phbbmu;
4215
4216 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4217
4218 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4219 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4220 &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
4221
4222 if (!arcmsr_hbb_wait_msgint_ready(acb))
4223 arcmsr_warn(acb,
4224 "timeout while waiting for background rebuild completion");
4225 }
4226
4227
4228 static void
arcmsr_stop_hbc_bgrb(struct ACB * acb)4229 arcmsr_stop_hbc_bgrb(struct ACB *acb)
4230 {
4231 struct HBC_msgUnit *phbcmu;
4232
4233 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4234
4235 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4236 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4237 &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4238 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4239 &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4240 if (!arcmsr_hbc_wait_msgint_ready(acb))
4241 arcmsr_warn(acb,
4242 "timeout while waiting for background rebuild completion");
4243 }
4244
4245
4246 static int
arcmsr_iop_message_xfer(struct ACB * acb,struct scsi_pkt * pkt)4247 arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt)
4248 {
4249 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
4250 struct CCB *ccb = pkt->pkt_ha_private;
4251 struct buf *bp = ccb->bp;
4252 uint8_t *pQbuffer;
4253 int retvalue = 0, transfer_len = 0;
4254 char *buffer;
4255 uint32_t controlcode;
4256
4257
4258 /* 4 bytes: Areca io control code */
4259 controlcode =
4260 (uint32_t)pkt->pkt_cdbp[5] << 24 |
4261 (uint32_t)pkt->pkt_cdbp[6] << 16 |
4262 (uint32_t)pkt->pkt_cdbp[7] << 8 |
4263 (uint32_t)pkt->pkt_cdbp[8];
4264
4265 if (bp->b_flags & (B_PHYS | B_PAGEIO))
4266 bp_mapin(bp);
4267
4268 buffer = bp->b_un.b_addr;
4269 transfer_len = bp->b_bcount;
4270 if (transfer_len > sizeof (struct CMD_MESSAGE_FIELD)) {
4271 retvalue = ARCMSR_MESSAGE_FAIL;
4272 goto message_out;
4273 }
4274
4275 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)(intptr_t)buffer;
4276 switch (controlcode) {
4277 case ARCMSR_MESSAGE_READ_RQBUFFER:
4278 {
4279 unsigned long *ver_addr;
4280 uint8_t *ptmpQbuffer;
4281 int32_t allxfer_len = 0;
4282
4283 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4284
4285 ptmpQbuffer = (uint8_t *)ver_addr;
4286 while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
4287 (allxfer_len < (MSGDATABUFLEN - 1))) {
4288 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
4289 (void) memcpy(ptmpQbuffer, pQbuffer, 1);
4290 acb->rqbuf_firstidx++;
4291 acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4292 ptmpQbuffer++;
4293 allxfer_len++;
4294 }
4295
4296 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4297 struct QBUFFER *prbuffer;
4298 uint8_t *iop_data;
4299 int32_t iop_len;
4300
4301 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4302 prbuffer = arcmsr_get_iop_rqbuffer(acb);
4303 iop_data = (uint8_t *)prbuffer->data;
4304 iop_len = (int32_t)prbuffer->data_len;
4305
4306 while (iop_len > 0) {
4307 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
4308 (void) memcpy(pQbuffer, iop_data, 1);
4309 acb->rqbuf_lastidx++;
4310 acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
4311 iop_data++;
4312 iop_len--;
4313 }
4314 arcmsr_iop_message_read(acb);
4315 }
4316
4317 (void) memcpy(pcmdmessagefld->messagedatabuffer,
4318 (uint8_t *)ver_addr, allxfer_len);
4319 pcmdmessagefld->cmdmessage.Length = allxfer_len;
4320 pcmdmessagefld->cmdmessage.ReturnCode =
4321 ARCMSR_MESSAGE_RETURNCODE_OK;
4322 kmem_free(ver_addr, MSGDATABUFLEN);
4323 break;
4324 }
4325
4326 case ARCMSR_MESSAGE_WRITE_WQBUFFER:
4327 {
4328 uint8_t *ver_addr;
4329 int32_t my_empty_len, user_len, wqbuf_firstidx,
4330 wqbuf_lastidx;
4331 uint8_t *ptmpuserbuffer;
4332
4333 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4334
4335 ptmpuserbuffer = ver_addr;
4336 user_len = min(pcmdmessagefld->cmdmessage.Length,
4337 MSGDATABUFLEN);
4338 (void) memcpy(ptmpuserbuffer,
4339 pcmdmessagefld->messagedatabuffer, user_len);
4340 wqbuf_lastidx = acb->wqbuf_lastidx;
4341 wqbuf_firstidx = acb->wqbuf_firstidx;
4342 if (wqbuf_lastidx != wqbuf_firstidx) {
4343 struct scsi_arq_status *arq_status;
4344
4345 arcmsr_post_ioctldata2iop(acb);
4346 arq_status = (struct scsi_arq_status *)
4347 (intptr_t)(pkt->pkt_scbp);
4348 bzero((caddr_t)arq_status,
4349 sizeof (struct scsi_arq_status));
4350 arq_status->sts_rqpkt_reason = CMD_CMPLT;
4351 arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
4352 STATE_GOT_TARGET | STATE_SENT_CMD |
4353 STATE_XFERRED_DATA | STATE_GOT_STATUS);
4354
4355 arq_status->sts_rqpkt_statistics =
4356 pkt->pkt_statistics;
4357 arq_status->sts_rqpkt_resid = 0;
4358 if (&arq_status->sts_sensedata != NULL) {
4359 struct scsi_extended_sense *sts_sensedata;
4360
4361 sts_sensedata = &arq_status->sts_sensedata;
4362
4363 /* has error report sensedata */
4364 sts_sensedata->es_code = 0x0;
4365 sts_sensedata->es_valid = 0x01;
4366 sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
4367 /* AdditionalSenseLength */
4368 sts_sensedata->es_add_len = 0x0A;
4369 /* AdditionalSenseCode */
4370 sts_sensedata->es_add_code = 0x20;
4371 }
4372 retvalue = ARCMSR_MESSAGE_FAIL;
4373 } else {
4374 my_empty_len = (wqbuf_firstidx-wqbuf_lastidx - 1) &
4375 (ARCMSR_MAX_QBUFFER - 1);
4376 if (my_empty_len >= user_len) {
4377 while (user_len > 0) {
4378 pQbuffer = &acb->wqbuffer[
4379 acb->wqbuf_lastidx];
4380 (void) memcpy(pQbuffer,
4381 ptmpuserbuffer, 1);
4382 acb->wqbuf_lastidx++;
4383 acb->wqbuf_lastidx %=
4384 ARCMSR_MAX_QBUFFER;
4385 ptmpuserbuffer++;
4386 user_len--;
4387 }
4388 if (acb->acb_flags &
4389 ACB_F_MESSAGE_WQBUFFER_CLEARED) {
4390 acb->acb_flags &=
4391 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
4392 arcmsr_post_ioctldata2iop(acb);
4393 }
4394 } else {
4395 struct scsi_arq_status *arq_status;
4396
4397 /* has error report sensedata */
4398 arq_status = (struct scsi_arq_status *)
4399 (intptr_t)(pkt->pkt_scbp);
4400 bzero((caddr_t)arq_status,
4401 sizeof (struct scsi_arq_status));
4402 arq_status->sts_rqpkt_reason = CMD_CMPLT;
4403 arq_status->sts_rqpkt_state =
4404 (STATE_GOT_BUS |
4405 STATE_GOT_TARGET |STATE_SENT_CMD |
4406 STATE_XFERRED_DATA | STATE_GOT_STATUS);
4407 arq_status->sts_rqpkt_statistics =
4408 pkt->pkt_statistics;
4409 arq_status->sts_rqpkt_resid = 0;
4410 if (&arq_status->sts_sensedata != NULL) {
4411 struct scsi_extended_sense *
4412 sts_sensedata;
4413
4414 sts_sensedata =
4415 &arq_status->sts_sensedata;
4416
4417 /* has error report sensedata */
4418 sts_sensedata->es_code = 0x0;
4419 sts_sensedata->es_valid = 0x01;
4420 sts_sensedata->es_key =
4421 KEY_ILLEGAL_REQUEST;
4422 /* AdditionalSenseLength */
4423 sts_sensedata->es_add_len = 0x0A;
4424 /* AdditionalSenseCode */
4425 sts_sensedata->es_add_code = 0x20;
4426 }
4427 retvalue = ARCMSR_MESSAGE_FAIL;
4428 }
4429 }
4430 kmem_free(ver_addr, MSGDATABUFLEN);
4431 break;
4432 }
4433
4434 case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
4435 pQbuffer = acb->rqbuffer;
4436
4437 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4438 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4439 arcmsr_iop_message_read(acb);
4440 }
4441 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
4442 acb->rqbuf_firstidx = 0;
4443 acb->rqbuf_lastidx = 0;
4444 (void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4445 pcmdmessagefld->cmdmessage.ReturnCode =
4446 ARCMSR_MESSAGE_RETURNCODE_OK;
4447 break;
4448 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
4449 pQbuffer = acb->wqbuffer;
4450
4451 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4452 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4453 arcmsr_iop_message_read(acb);
4454 }
4455 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4456 ACB_F_MESSAGE_WQBUFFER_READ);
4457 acb->wqbuf_firstidx = 0;
4458 acb->wqbuf_lastidx = 0;
4459 (void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4460 pcmdmessagefld->cmdmessage.ReturnCode =
4461 ARCMSR_MESSAGE_RETURNCODE_OK;
4462 break;
4463 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
4464
4465 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4466 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4467 arcmsr_iop_message_read(acb);
4468 }
4469 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4470 ACB_F_MESSAGE_RQBUFFER_CLEARED |
4471 ACB_F_MESSAGE_WQBUFFER_READ);
4472 acb->rqbuf_firstidx = 0;
4473 acb->rqbuf_lastidx = 0;
4474 acb->wqbuf_firstidx = 0;
4475 acb->wqbuf_lastidx = 0;
4476 pQbuffer = acb->rqbuffer;
4477 (void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4478 pQbuffer = acb->wqbuffer;
4479 (void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4480 pcmdmessagefld->cmdmessage.ReturnCode =
4481 ARCMSR_MESSAGE_RETURNCODE_OK;
4482 break;
4483
4484 case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
4485 pcmdmessagefld->cmdmessage.ReturnCode =
4486 ARCMSR_MESSAGE_RETURNCODE_3F;
4487 break;
4488 /*
4489 * Not supported - ARCMSR_MESSAGE_SAY_HELLO
4490 */
4491 case ARCMSR_MESSAGE_SAY_GOODBYE:
4492 arcmsr_iop_parking(acb);
4493 break;
4494 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
4495 switch (acb->adapter_type) {
4496 case ACB_ADAPTER_TYPE_A:
4497 arcmsr_flush_hba_cache(acb);
4498 break;
4499 case ACB_ADAPTER_TYPE_B:
4500 arcmsr_flush_hbb_cache(acb);
4501 break;
4502 case ACB_ADAPTER_TYPE_C:
4503 arcmsr_flush_hbc_cache(acb);
4504 break;
4505 }
4506 break;
4507 default:
4508 retvalue = ARCMSR_MESSAGE_FAIL;
4509 }
4510
4511 message_out:
4512
4513 return (retvalue);
4514 }
4515
4516
4517
4518
4519 static void
arcmsr_pcidev_disattach(struct ACB * acb)4520 arcmsr_pcidev_disattach(struct ACB *acb)
4521 {
4522 struct CCB *ccb;
4523 int i = 0;
4524
4525 /* disable all outbound interrupts */
4526 (void) arcmsr_disable_allintr(acb);
4527 /* stop adapter background rebuild */
4528 switch (acb->adapter_type) {
4529 case ACB_ADAPTER_TYPE_A:
4530 arcmsr_stop_hba_bgrb(acb);
4531 arcmsr_flush_hba_cache(acb);
4532 break;
4533 case ACB_ADAPTER_TYPE_B:
4534 arcmsr_stop_hbb_bgrb(acb);
4535 arcmsr_flush_hbb_cache(acb);
4536 break;
4537 case ACB_ADAPTER_TYPE_C:
4538 arcmsr_stop_hbc_bgrb(acb);
4539 arcmsr_flush_hbc_cache(acb);
4540 break;
4541 }
4542 /* abort all outstanding commands */
4543 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
4544 acb->acb_flags &= ~ACB_F_IOP_INITED;
4545
4546 if (acb->ccboutstandingcount != 0) {
4547 /* clear and abort all outbound posted Q */
4548 arcmsr_done4abort_postqueue(acb);
4549 /* talk to iop outstanding command aborted */
4550 (void) arcmsr_abort_host_command(acb);
4551
4552 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
4553 ccb = acb->pccb_pool[i];
4554 if (ccb->ccb_state == ARCMSR_CCB_START) {
4555 /* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
4556 ccb->pkt->pkt_reason = CMD_ABORTED;
4557 ccb->pkt->pkt_statistics |= STAT_ABORTED;
4558 arcmsr_ccb_complete(ccb, 1);
4559 }
4560 }
4561 }
4562 }
4563
4564 /* get firmware miscellaneous data */
4565 static void
arcmsr_get_hba_config(struct ACB * acb)4566 arcmsr_get_hba_config(struct ACB *acb)
4567 {
4568 struct HBA_msgUnit *phbamu;
4569
4570 char *acb_firm_model;
4571 char *acb_firm_version;
4572 char *acb_device_map;
4573 char *iop_firm_model;
4574 char *iop_firm_version;
4575 char *iop_device_map;
4576 int count;
4577
4578 phbamu = (struct HBA_msgUnit *)acb->pmu;
4579 acb_firm_model = acb->firm_model;
4580 acb_firm_version = acb->firm_version;
4581 acb_device_map = acb->device_map;
4582 /* firm_model, 15 */
4583 iop_firm_model =
4584 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4585 /* firm_version, 17 */
4586 iop_firm_version =
4587 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4588
4589 /* device_map, 21 */
4590 iop_device_map =
4591 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4592
4593 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4594 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4595
4596 if (!arcmsr_hba_wait_msgint_ready(acb))
4597 arcmsr_warn(acb,
4598 "timeout while waiting for adapter firmware "
4599 "miscellaneous data");
4600
4601 count = 8;
4602 while (count) {
4603 *acb_firm_model = CHIP_REG_READ8(acb->reg_mu_acc_handle0,
4604 iop_firm_model);
4605 acb_firm_model++;
4606 iop_firm_model++;
4607 count--;
4608 }
4609
4610 count = 16;
4611 while (count) {
4612 *acb_firm_version =
4613 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4614 acb_firm_version++;
4615 iop_firm_version++;
4616 count--;
4617 }
4618
4619 count = 16;
4620 while (count) {
4621 *acb_device_map =
4622 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4623 acb_device_map++;
4624 iop_device_map++;
4625 count--;
4626 }
4627
4628 arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4629 acb->firm_version);
4630
4631 /* firm_request_len, 1 */
4632 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4633 &phbamu->msgcode_rwbuffer[1]);
4634 /* firm_numbers_queue, 2 */
4635 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4636 &phbamu->msgcode_rwbuffer[2]);
4637 /* firm_sdram_size, 3 */
4638 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4639 &phbamu->msgcode_rwbuffer[3]);
4640 /* firm_ide_channels, 4 */
4641 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4642 &phbamu->msgcode_rwbuffer[4]);
4643 }
4644
4645 /* get firmware miscellaneous data */
4646 static void
arcmsr_get_hbb_config(struct ACB * acb)4647 arcmsr_get_hbb_config(struct ACB *acb)
4648 {
4649 struct HBB_msgUnit *phbbmu;
4650 char *acb_firm_model;
4651 char *acb_firm_version;
4652 char *acb_device_map;
4653 char *iop_firm_model;
4654 char *iop_firm_version;
4655 char *iop_device_map;
4656 int count;
4657
4658 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4659 acb_firm_model = acb->firm_model;
4660 acb_firm_version = acb->firm_version;
4661 acb_device_map = acb->device_map;
4662 /* firm_model, 15 */
4663 iop_firm_model = (char *)
4664 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4665 /* firm_version, 17 */
4666 iop_firm_version = (char *)
4667 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4668 /* device_map, 21 */
4669 iop_device_map = (char *)
4670 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4671
4672 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4673 &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
4674
4675 if (!arcmsr_hbb_wait_msgint_ready(acb))
4676 arcmsr_warn(acb,
4677 "timeout while waiting for adapter firmware "
4678 "miscellaneous data");
4679
4680 count = 8;
4681 while (count) {
4682 *acb_firm_model =
4683 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_model);
4684 acb_firm_model++;
4685 iop_firm_model++;
4686 count--;
4687 }
4688 count = 16;
4689 while (count) {
4690 *acb_firm_version =
4691 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_version);
4692 acb_firm_version++;
4693 iop_firm_version++;
4694 count--;
4695 }
4696 count = 16;
4697 while (count) {
4698 *acb_device_map =
4699 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_device_map);
4700 acb_device_map++;
4701 iop_device_map++;
4702 count--;
4703 }
4704
4705 arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4706 acb->firm_version);
4707
4708 /* firm_request_len, 1 */
4709 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4710 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1]);
4711 /* firm_numbers_queue, 2 */
4712 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4713 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2]);
4714 /* firm_sdram_size, 3 */
4715 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4716 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3]);
4717 /* firm_ide_channels, 4 */
4718 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4719 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4]);
4720 }
4721
4722
4723 /* get firmware miscellaneous data */
4724 static void
arcmsr_get_hbc_config(struct ACB * acb)4725 arcmsr_get_hbc_config(struct ACB *acb)
4726 {
4727 struct HBC_msgUnit *phbcmu;
4728
4729 char *acb_firm_model;
4730 char *acb_firm_version;
4731 char *acb_device_map;
4732 char *iop_firm_model;
4733 char *iop_firm_version;
4734 char *iop_device_map;
4735 int count;
4736
4737 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4738 acb_firm_model = acb->firm_model;
4739 acb_firm_version = acb->firm_version;
4740 acb_device_map = acb->device_map;
4741 /* firm_model, 15 */
4742 iop_firm_model =
4743 (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4744 /* firm_version, 17 */
4745 iop_firm_version =
4746 (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4747 /* device_map, 21 */
4748 iop_device_map =
4749 (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4750 /* post "get config" instruction */
4751 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4752 &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4753 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4754 &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4755 if (!arcmsr_hbc_wait_msgint_ready(acb))
4756 arcmsr_warn(acb,
4757 "timeout while waiting for adapter firmware "
4758 "miscellaneous data");
4759 count = 8;
4760 while (count) {
4761 *acb_firm_model =
4762 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_model);
4763 acb_firm_model++;
4764 iop_firm_model++;
4765 count--;
4766 }
4767
4768 count = 16;
4769 while (count) {
4770 *acb_firm_version =
4771 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4772 acb_firm_version++;
4773 iop_firm_version++;
4774 count--;
4775 }
4776
4777 count = 16;
4778 while (count) {
4779 *acb_device_map =
4780 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4781 acb_device_map++;
4782 iop_device_map++;
4783 count--;
4784 }
4785
4786 arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4787 acb->firm_version);
4788
4789 /* firm_request_len, 1, 04-07 */
4790 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4791 &phbcmu->msgcode_rwbuffer[1]);
4792 /* firm_numbers_queue, 2, 08-11 */
4793 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4794 &phbcmu->msgcode_rwbuffer[2]);
4795 /* firm_sdram_size, 3, 12-15 */
4796 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4797 &phbcmu->msgcode_rwbuffer[3]);
4798 /* firm_ide_channels, 4, 16-19 */
4799 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4800 &phbcmu->msgcode_rwbuffer[4]);
4801 /* firm_cfg_version, 25, 100-103 */
4802 acb->firm_cfg_version = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4803 &phbcmu->msgcode_rwbuffer[25]);
4804 }
4805
4806
4807 /* start background rebuild */
4808 static void
arcmsr_start_hba_bgrb(struct ACB * acb)4809 arcmsr_start_hba_bgrb(struct ACB *acb) {
4810
4811 struct HBA_msgUnit *phbamu;
4812
4813 phbamu = (struct HBA_msgUnit *)acb->pmu;
4814
4815 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4816 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4817 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4818
4819 if (!arcmsr_hba_wait_msgint_ready(acb))
4820 arcmsr_warn(acb,
4821 "timeout while waiting for background rebuild to start");
4822 }
4823
4824
4825 static void
arcmsr_start_hbb_bgrb(struct ACB * acb)4826 arcmsr_start_hbb_bgrb(struct ACB *acb) {
4827
4828 struct HBB_msgUnit *phbbmu;
4829
4830 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4831
4832 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4833 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4834 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4835 ARCMSR_MESSAGE_START_BGRB);
4836
4837 if (!arcmsr_hbb_wait_msgint_ready(acb))
4838 arcmsr_warn(acb,
4839 "timeout while waiting for background rebuild to start");
4840 }
4841
4842
4843 static void
arcmsr_start_hbc_bgrb(struct ACB * acb)4844 arcmsr_start_hbc_bgrb(struct ACB *acb) {
4845
4846 struct HBC_msgUnit *phbcmu;
4847
4848 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4849
4850 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4851 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4852 &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4853 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4854 &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4855 if (!arcmsr_hbc_wait_msgint_ready(acb))
4856 arcmsr_warn(acb,
4857 "timeout while waiting for background rebuild to start");
4858 }
4859
4860 static void
arcmsr_polling_hba_ccbdone(struct ACB * acb,struct CCB * poll_ccb)4861 arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4862 {
4863 struct HBA_msgUnit *phbamu;
4864 struct CCB *ccb;
4865 boolean_t error;
4866 uint32_t flag_ccb, outbound_intstatus, intmask_org;
4867 boolean_t poll_ccb_done = B_FALSE;
4868 uint32_t poll_count = 0;
4869
4870
4871 phbamu = (struct HBA_msgUnit *)acb->pmu;
4872
4873 polling_ccb_retry:
4874 /* TODO: Use correct offset and size for syncing? */
4875 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4876 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4877 return;
4878 intmask_org = arcmsr_disable_allintr(acb);
4879
4880 for (;;) {
4881 if ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4882 &phbamu->outbound_queueport)) == 0xFFFFFFFF) {
4883 if (poll_ccb_done) {
4884 /* chip FIFO no ccb for completion already */
4885 break;
4886 } else {
4887 drv_usecwait(25000);
4888 if ((poll_count > 100) && (poll_ccb != NULL)) {
4889 break;
4890 }
4891 if (acb->ccboutstandingcount == 0) {
4892 break;
4893 }
4894 poll_count++;
4895 outbound_intstatus =
4896 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4897 &phbamu->outbound_intstatus) &
4898 acb->outbound_int_enable;
4899
4900 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4901 &phbamu->outbound_intstatus,
4902 outbound_intstatus); /* clear interrupt */
4903 }
4904 }
4905
4906 /* frame must be 32 bytes aligned */
4907 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4908
4909 /* check if command done with no error */
4910 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4911 B_TRUE : B_FALSE;
4912 if (poll_ccb != NULL)
4913 poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4914
4915 if (ccb->acb != acb) {
4916 arcmsr_warn(acb, "ccb got a wrong acb!");
4917 continue;
4918 }
4919 if (ccb->ccb_state != ARCMSR_CCB_START) {
4920 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
4921 ccb->ccb_state |= ARCMSR_CCB_BACK;
4922 ccb->pkt->pkt_reason = CMD_ABORTED;
4923 ccb->pkt->pkt_statistics |= STAT_ABORTED;
4924 arcmsr_ccb_complete(ccb, 1);
4925 continue;
4926 }
4927 arcmsr_report_ccb_state(acb, ccb, error);
4928 arcmsr_warn(acb,
4929 "polling op got unexpected ccb command done");
4930 continue;
4931 }
4932 arcmsr_report_ccb_state(acb, ccb, error);
4933 } /* drain reply FIFO */
4934 arcmsr_enable_allintr(acb, intmask_org);
4935 }
4936
4937
4938 static void
arcmsr_polling_hbb_ccbdone(struct ACB * acb,struct CCB * poll_ccb)4939 arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4940 {
4941 struct HBB_msgUnit *phbbmu;
4942 struct CCB *ccb;
4943 uint32_t flag_ccb, intmask_org;
4944 boolean_t error;
4945 uint32_t poll_count = 0;
4946 int index;
4947 boolean_t poll_ccb_done = B_FALSE;
4948
4949
4950 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4951
4952
4953 polling_ccb_retry:
4954 /* Use correct offset and size for syncing */
4955 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4956 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4957 return;
4958
4959 intmask_org = arcmsr_disable_allintr(acb);
4960
4961 for (;;) {
4962 index = phbbmu->doneq_index;
4963 if ((flag_ccb = phbbmu->done_qbuffer[index]) == 0) {
4964 if (poll_ccb_done) {
4965 /* chip FIFO no ccb for completion already */
4966 break;
4967 } else {
4968 drv_usecwait(25000);
4969 if ((poll_count > 100) && (poll_ccb != NULL))
4970 break;
4971 if (acb->ccboutstandingcount == 0)
4972 break;
4973 poll_count++;
4974 /* clear doorbell interrupt */
4975 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4976 &phbbmu->hbb_doorbell->iop2drv_doorbell,
4977 ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
4978 }
4979 }
4980
4981 phbbmu->done_qbuffer[index] = 0;
4982 index++;
4983 /* if last index number set it to 0 */
4984 index %= ARCMSR_MAX_HBB_POSTQUEUE;
4985 phbbmu->doneq_index = index;
4986 /* check if command done with no error */
4987 /* frame must be 32 bytes aligned */
4988 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4989
4990 /* check if command done with no error */
4991 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4992 B_TRUE : B_FALSE;
4993
4994 if (poll_ccb != NULL)
4995 poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4996 if (ccb->acb != acb) {
4997 arcmsr_warn(acb, "ccb got a wrong acb!");
4998 continue;
4999 }
5000 if (ccb->ccb_state != ARCMSR_CCB_START) {
5001 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
5002 ccb->ccb_state |= ARCMSR_CCB_BACK;
5003 ccb->pkt->pkt_reason = CMD_ABORTED;
5004 ccb->pkt->pkt_statistics |= STAT_ABORTED;
5005 arcmsr_ccb_complete(ccb, 1);
5006 continue;
5007 }
5008 arcmsr_report_ccb_state(acb, ccb, error);
5009 arcmsr_warn(acb,
5010 "polling op got unexpect ccb command done");
5011 continue;
5012 }
5013 arcmsr_report_ccb_state(acb, ccb, error);
5014 } /* drain reply FIFO */
5015 arcmsr_enable_allintr(acb, intmask_org);
5016 }
5017
5018
5019 static void
arcmsr_polling_hbc_ccbdone(struct ACB * acb,struct CCB * poll_ccb)5020 arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
5021 {
5022
5023 struct HBC_msgUnit *phbcmu;
5024 struct CCB *ccb;
5025 boolean_t error;
5026 uint32_t ccb_cdb_phy;
5027 uint32_t flag_ccb, intmask_org;
5028 boolean_t poll_ccb_done = B_FALSE;
5029 uint32_t poll_count = 0;
5030
5031
5032 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5033
5034 polling_ccb_retry:
5035
5036 /* Use correct offset and size for syncing */
5037 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5038 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5039 return;
5040
5041 intmask_org = arcmsr_disable_allintr(acb);
5042
5043 for (;;) {
5044 if (!(CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5045 &phbcmu->host_int_status) &
5046 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
5047
5048 if (poll_ccb_done) {
5049 /* chip FIFO no ccb for completion already */
5050 break;
5051 } else {
5052 drv_usecwait(25000);
5053 if ((poll_count > 100) && (poll_ccb != NULL)) {
5054 break;
5055 }
5056 if (acb->ccboutstandingcount == 0) {
5057 break;
5058 }
5059 poll_count++;
5060 }
5061 }
5062 flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5063 &phbcmu->outbound_queueport_low);
5064 /* frame must be 32 bytes aligned */
5065 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5066 /* the CDB is the first field of the CCB */
5067 ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5068
5069 /* check if command done with no error */
5070 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5071 B_TRUE : B_FALSE;
5072 if (poll_ccb != NULL)
5073 poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
5074
5075 if (ccb->acb != acb) {
5076 arcmsr_warn(acb, "ccb got a wrong acb!");
5077 continue;
5078 }
5079 if (ccb->ccb_state != ARCMSR_CCB_START) {
5080 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
5081 ccb->ccb_state |= ARCMSR_CCB_BACK;
5082 ccb->pkt->pkt_reason = CMD_ABORTED;
5083 ccb->pkt->pkt_statistics |= STAT_ABORTED;
5084 arcmsr_ccb_complete(ccb, 1);
5085 continue;
5086 }
5087 arcmsr_report_ccb_state(acb, ccb, error);
5088 arcmsr_warn(acb,
5089 "polling op got unexpected ccb command done");
5090 continue;
5091 }
5092 arcmsr_report_ccb_state(acb, ccb, error);
5093 } /* drain reply FIFO */
5094 arcmsr_enable_allintr(acb, intmask_org);
5095 }
5096
5097
5098 /*
5099 * Function: arcmsr_hba_hardware_reset()
5100 * Bug Fix for Intel IOP cause firmware hang on.
5101 * and kernel panic
5102 */
5103 static void
arcmsr_hba_hardware_reset(struct ACB * acb)5104 arcmsr_hba_hardware_reset(struct ACB *acb)
5105 {
5106 struct HBA_msgUnit *phbamu;
5107 uint8_t value[64];
5108 int i;
5109
5110 phbamu = (struct HBA_msgUnit *)acb->pmu;
5111 /* backup pci config data */
5112 for (i = 0; i < 64; i++) {
5113 value[i] = pci_config_get8(acb->pci_acc_handle, i);
5114 }
5115 /* hardware reset signal */
5116 if ((PCI_DEVICE_ID_ARECA_1680 ==
5117 pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID))) {
5118 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5119 &phbamu->reserved1[0], 0x00000003);
5120 } else {
5121 pci_config_put8(acb->pci_acc_handle, 0x84, 0x20);
5122 }
5123 drv_usecwait(1000000);
5124 /* write back pci config data */
5125 for (i = 0; i < 64; i++) {
5126 pci_config_put8(acb->pci_acc_handle, i, value[i]);
5127 }
5128 drv_usecwait(1000000);
5129 }
5130
5131 /*
5132 * Function: arcmsr_abort_host_command
5133 */
5134 static uint8_t
arcmsr_abort_host_command(struct ACB * acb)5135 arcmsr_abort_host_command(struct ACB *acb)
5136 {
5137 uint8_t rtnval = 0;
5138
5139 switch (acb->adapter_type) {
5140 case ACB_ADAPTER_TYPE_A:
5141 rtnval = arcmsr_abort_hba_allcmd(acb);
5142 break;
5143 case ACB_ADAPTER_TYPE_B:
5144 rtnval = arcmsr_abort_hbb_allcmd(acb);
5145 break;
5146 case ACB_ADAPTER_TYPE_C:
5147 rtnval = arcmsr_abort_hbc_allcmd(acb);
5148 break;
5149 }
5150 return (rtnval);
5151 }
5152
5153 /*
5154 * Function: arcmsr_handle_iop_bus_hold
5155 */
5156 static void
arcmsr_handle_iop_bus_hold(struct ACB * acb)5157 arcmsr_handle_iop_bus_hold(struct ACB *acb)
5158 {
5159
5160 switch (acb->adapter_type) {
5161 case ACB_ADAPTER_TYPE_A:
5162 {
5163 struct HBA_msgUnit *phbamu;
5164 int retry_count = 0;
5165
5166 acb->timeout_count = 0;
5167 phbamu = (struct HBA_msgUnit *)acb->pmu;
5168 arcmsr_hba_hardware_reset(acb);
5169 acb->acb_flags &= ~ACB_F_IOP_INITED;
5170 sleep_again:
5171 drv_usecwait(1000000);
5172 if ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5173 &phbamu->outbound_msgaddr1) &
5174 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
5175 if (retry_count > 60) {
5176 arcmsr_warn(acb,
5177 "waiting for hardware"
5178 "bus reset return, RETRY TERMINATED!!");
5179 return;
5180 }
5181 retry_count++;
5182 goto sleep_again;
5183 }
5184 arcmsr_iop_init(acb);
5185 break;
5186 }
5187
5188 }
5189 }
5190
5191 static void
arcmsr_iop2drv_data_wrote_handle(struct ACB * acb)5192 arcmsr_iop2drv_data_wrote_handle(struct ACB *acb) {
5193
5194 struct QBUFFER *prbuffer;
5195 uint8_t *pQbuffer;
5196 uint8_t *iop_data;
5197 int my_empty_len, iop_len;
5198 int rqbuf_firstidx, rqbuf_lastidx;
5199
5200 /* check this iop data if overflow my rqbuffer */
5201 rqbuf_lastidx = acb->rqbuf_lastidx;
5202 rqbuf_firstidx = acb->rqbuf_firstidx;
5203 prbuffer = arcmsr_get_iop_rqbuffer(acb);
5204 iop_data = (uint8_t *)prbuffer->data;
5205 iop_len = prbuffer->data_len;
5206 my_empty_len = (rqbuf_firstidx-rqbuf_lastidx - 1) &
5207 (ARCMSR_MAX_QBUFFER - 1);
5208
5209 if (my_empty_len >= iop_len) {
5210 while (iop_len > 0) {
5211 pQbuffer = &acb->rqbuffer[rqbuf_lastidx];
5212 (void) memcpy(pQbuffer, iop_data, 1);
5213 rqbuf_lastidx++;
5214 /* if last index number set it to 0 */
5215 rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
5216 iop_data++;
5217 iop_len--;
5218 }
5219 acb->rqbuf_lastidx = rqbuf_lastidx;
5220 arcmsr_iop_message_read(acb);
5221 /* signature, let IOP know data has been read */
5222 } else {
5223 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
5224 }
5225 }
5226
5227
5228
5229 static void
arcmsr_iop2drv_data_read_handle(struct ACB * acb)5230 arcmsr_iop2drv_data_read_handle(struct ACB *acb) {
5231
5232 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
5233 /*
5234 * check if there are any mail packages from user space program
5235 * in my post bag, now is the time to send them into Areca's firmware
5236 */
5237
5238 if (acb->wqbuf_firstidx != acb->wqbuf_lastidx) {
5239
5240 uint8_t *pQbuffer;
5241 struct QBUFFER *pwbuffer;
5242 uint8_t *iop_data;
5243 int allxfer_len = 0;
5244
5245 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
5246 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
5247 iop_data = (uint8_t *)pwbuffer->data;
5248
5249 while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
5250 (allxfer_len < 124)) {
5251 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
5252 (void) memcpy(iop_data, pQbuffer, 1);
5253 acb->wqbuf_firstidx++;
5254 /* if last index number set it to 0 */
5255 acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
5256 iop_data++;
5257 allxfer_len++;
5258 }
5259 pwbuffer->data_len = allxfer_len;
5260 /*
5261 * push inbound doorbell, tell iop driver data write ok
5262 * await reply on next hwinterrupt for next Qbuffer post
5263 */
5264 arcmsr_iop_message_wrote(acb);
5265 }
5266
5267 if (acb->wqbuf_firstidx == acb->wqbuf_lastidx)
5268 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
5269 }
5270
5271
5272 static void
arcmsr_hba_doorbell_isr(struct ACB * acb)5273 arcmsr_hba_doorbell_isr(struct ACB *acb)
5274 {
5275 uint32_t outbound_doorbell;
5276 struct HBA_msgUnit *phbamu;
5277
5278 phbamu = (struct HBA_msgUnit *)acb->pmu;
5279
5280 /*
5281 * Maybe here we need to check wrqbuffer_lock is locked or not
5282 * DOORBELL: ding! dong!
5283 * check if there are any mail need to pack from firmware
5284 */
5285
5286 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5287 &phbamu->outbound_doorbell);
5288 /* clear doorbell interrupt */
5289 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5290 &phbamu->outbound_doorbell, outbound_doorbell);
5291
5292 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
5293 arcmsr_iop2drv_data_wrote_handle(acb);
5294
5295
5296 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
5297 arcmsr_iop2drv_data_read_handle(acb);
5298 }
5299
5300
5301
5302 static void
arcmsr_hbc_doorbell_isr(struct ACB * acb)5303 arcmsr_hbc_doorbell_isr(struct ACB *acb)
5304 {
5305 uint32_t outbound_doorbell;
5306 struct HBC_msgUnit *phbcmu;
5307
5308 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5309
5310 /*
5311 * Maybe here we need to check wrqbuffer_lock is locked or not
5312 * DOORBELL: ding! dong!
5313 * check if there are any mail need to pick from firmware
5314 */
5315
5316 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5317 &phbcmu->outbound_doorbell);
5318 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5319 &phbcmu->outbound_doorbell_clear,
5320 outbound_doorbell); /* clear interrupt */
5321 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
5322 arcmsr_iop2drv_data_wrote_handle(acb);
5323 }
5324 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
5325 arcmsr_iop2drv_data_read_handle(acb);
5326 }
5327 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
5328 /* messenger of "driver to iop commands" */
5329 arcmsr_hbc_message_isr(acb);
5330 }
5331 }
5332
5333
5334 static void
arcmsr_hba_message_isr(struct ACB * acb)5335 arcmsr_hba_message_isr(struct ACB *acb)
5336 {
5337 struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
5338 uint32_t *signature = (&phbamu->msgcode_rwbuffer[0]);
5339 uint32_t outbound_message;
5340
5341 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5342 &phbamu->outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
5343
5344 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5345 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5346 if ((ddi_taskq_dispatch(acb->taskq,
5347 (void (*)(void *))arcmsr_dr_handle,
5348 acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5349 arcmsr_warn(acb, "DR task start failed");
5350 }
5351 }
5352
5353 static void
arcmsr_hbb_message_isr(struct ACB * acb)5354 arcmsr_hbb_message_isr(struct ACB *acb)
5355 {
5356 struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
5357 uint32_t *signature = (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0]);
5358 uint32_t outbound_message;
5359
5360 /* clear interrupts */
5361 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5362 &phbbmu->hbb_doorbell->iop2drv_doorbell,
5363 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5364 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5365 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5366 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5367
5368 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5369 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5370 if ((ddi_taskq_dispatch(acb->taskq,
5371 (void (*)(void *))arcmsr_dr_handle,
5372 acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5373 arcmsr_warn(acb, "DR task start failed");
5374 }
5375 }
5376
5377 static void
arcmsr_hbc_message_isr(struct ACB * acb)5378 arcmsr_hbc_message_isr(struct ACB *acb)
5379 {
5380 struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
5381 uint32_t *signature = (&phbcmu->msgcode_rwbuffer[0]);
5382 uint32_t outbound_message;
5383
5384 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5385 &phbcmu->outbound_doorbell_clear,
5386 ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
5387
5388 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5389 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5390 if ((ddi_taskq_dispatch(acb->taskq,
5391 (void (*)(void *))arcmsr_dr_handle,
5392 acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5393 arcmsr_warn(acb, "DR task start failed");
5394 }
5395 }
5396
5397
5398 static void
arcmsr_hba_postqueue_isr(struct ACB * acb)5399 arcmsr_hba_postqueue_isr(struct ACB *acb)
5400 {
5401
5402 struct HBA_msgUnit *phbamu;
5403 struct CCB *ccb;
5404 uint32_t flag_ccb;
5405 boolean_t error;
5406
5407 phbamu = (struct HBA_msgUnit *)acb->pmu;
5408
5409 /* areca cdb command done */
5410 /* Use correct offset and size for syncing */
5411 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5412 DDI_DMA_SYNC_FORKERNEL);
5413
5414 while ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5415 &phbamu->outbound_queueport)) != 0xFFFFFFFF) {
5416 /* frame must be 32 bytes aligned */
5417 ccb = NumToPtr((acb->vir2phy_offset+(flag_ccb << 5)));
5418 /* check if command done with no error */
5419 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5420 B_TRUE : B_FALSE;
5421 arcmsr_drain_donequeue(acb, ccb, error);
5422 } /* drain reply FIFO */
5423 }
5424
5425
5426 static void
arcmsr_hbb_postqueue_isr(struct ACB * acb)5427 arcmsr_hbb_postqueue_isr(struct ACB *acb)
5428 {
5429 struct HBB_msgUnit *phbbmu;
5430 struct CCB *ccb;
5431 uint32_t flag_ccb;
5432 boolean_t error;
5433 int index;
5434
5435 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5436
5437 /* areca cdb command done */
5438 index = phbbmu->doneq_index;
5439 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5440 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5441 return;
5442 while ((flag_ccb = phbbmu->done_qbuffer[index]) != 0) {
5443 phbbmu->done_qbuffer[index] = 0;
5444 /* frame must be 32 bytes aligned */
5445
5446 /* the CDB is the first field of the CCB */
5447 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
5448
5449 /* check if command done with no error */
5450 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5451 B_TRUE : B_FALSE;
5452 arcmsr_drain_donequeue(acb, ccb, error);
5453 index++;
5454 /* if last index number set it to 0 */
5455 index %= ARCMSR_MAX_HBB_POSTQUEUE;
5456 phbbmu->doneq_index = index;
5457 } /* drain reply FIFO */
5458 }
5459
5460
5461 static void
arcmsr_hbc_postqueue_isr(struct ACB * acb)5462 arcmsr_hbc_postqueue_isr(struct ACB *acb)
5463 {
5464
5465 struct HBC_msgUnit *phbcmu;
5466 struct CCB *ccb;
5467 uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
5468 boolean_t error;
5469
5470 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5471 /* areca cdb command done */
5472 /* Use correct offset and size for syncing */
5473 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5474 DDI_DMA_SYNC_FORKERNEL);
5475
5476 while (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5477 &phbcmu->host_int_status) &
5478 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5479 /* check if command done with no error */
5480 flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5481 &phbcmu->outbound_queueport_low);
5482 /* frame must be 32 bytes aligned */
5483 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5484
5485 /* the CDB is the first field of the CCB */
5486 ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5487
5488 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5489 B_TRUE : B_FALSE;
5490 /* check if command done with no error */
5491 arcmsr_drain_donequeue(acb, ccb, error);
5492 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
5493 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5494 &phbcmu->inbound_doorbell,
5495 ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
5496 break;
5497 }
5498 throttling++;
5499 } /* drain reply FIFO */
5500 }
5501
5502
5503 static uint_t
arcmsr_handle_hba_isr(struct ACB * acb)5504 arcmsr_handle_hba_isr(struct ACB *acb) {
5505
5506 uint32_t outbound_intstatus;
5507 struct HBA_msgUnit *phbamu;
5508
5509 phbamu = (struct HBA_msgUnit *)acb->pmu;
5510
5511 outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5512 &phbamu->outbound_intstatus) & acb->outbound_int_enable;
5513
5514 if (outbound_intstatus == 0) /* it must be a shared irq */
5515 return (DDI_INTR_UNCLAIMED);
5516
5517 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
5518 outbound_intstatus); /* clear interrupt */
5519
5520 /* MU doorbell interrupts */
5521
5522 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
5523 arcmsr_hba_doorbell_isr(acb);
5524
5525 /* MU post queue interrupts */
5526 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
5527 arcmsr_hba_postqueue_isr(acb);
5528
5529 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
5530 arcmsr_hba_message_isr(acb);
5531 }
5532
5533 return (DDI_INTR_CLAIMED);
5534 }
5535
5536
5537 static uint_t
arcmsr_handle_hbb_isr(struct ACB * acb)5538 arcmsr_handle_hbb_isr(struct ACB *acb) {
5539
5540 uint32_t outbound_doorbell;
5541 struct HBB_msgUnit *phbbmu;
5542
5543
5544 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5545
5546 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5547 &phbbmu->hbb_doorbell->iop2drv_doorbell) & acb->outbound_int_enable;
5548
5549 if (outbound_doorbell == 0) /* it must be a shared irq */
5550 return (DDI_INTR_UNCLAIMED);
5551
5552 /* clear doorbell interrupt */
5553 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5554 &phbbmu->hbb_doorbell->iop2drv_doorbell, ~outbound_doorbell);
5555 /* wait a cycle */
5556 (void) CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5557 &phbbmu->hbb_doorbell->iop2drv_doorbell);
5558 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5559 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5560 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5561
5562 /* MU ioctl transfer doorbell interrupts */
5563 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
5564 arcmsr_iop2drv_data_wrote_handle(acb);
5565
5566 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
5567 arcmsr_iop2drv_data_read_handle(acb);
5568
5569 /* MU post queue interrupts */
5570 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
5571 arcmsr_hbb_postqueue_isr(acb);
5572
5573 /* MU message interrupt */
5574
5575 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
5576 arcmsr_hbb_message_isr(acb);
5577 }
5578
5579 return (DDI_INTR_CLAIMED);
5580 }
5581
5582 static uint_t
arcmsr_handle_hbc_isr(struct ACB * acb)5583 arcmsr_handle_hbc_isr(struct ACB *acb)
5584 {
5585 uint32_t host_interrupt_status;
5586 struct HBC_msgUnit *phbcmu;
5587
5588 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5589 /* check outbound intstatus */
5590 host_interrupt_status=
5591 CHIP_REG_READ32(acb->reg_mu_acc_handle0, &phbcmu->host_int_status);
5592 if (host_interrupt_status == 0) /* it must be share irq */
5593 return (DDI_INTR_UNCLAIMED);
5594 /* MU ioctl transfer doorbell interrupts */
5595 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
5596 /* messenger of "ioctl message read write" */
5597 arcmsr_hbc_doorbell_isr(acb);
5598 }
5599 /* MU post queue interrupts */
5600 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5601 /* messenger of "scsi commands" */
5602 arcmsr_hbc_postqueue_isr(acb);
5603 }
5604 return (DDI_INTR_CLAIMED);
5605 }
5606
5607 static uint_t
arcmsr_intr_handler(caddr_t arg,caddr_t arg2)5608 arcmsr_intr_handler(caddr_t arg, caddr_t arg2)
5609 {
5610 struct ACB *acb = (void *)arg;
5611 struct CCB *ccb;
5612 uint_t retrn = DDI_INTR_UNCLAIMED;
5613 _NOTE(ARGUNUSED(arg2))
5614
5615 mutex_enter(&acb->isr_mutex);
5616 switch (acb->adapter_type) {
5617 case ACB_ADAPTER_TYPE_A:
5618 retrn = arcmsr_handle_hba_isr(acb);
5619 break;
5620
5621 case ACB_ADAPTER_TYPE_B:
5622 retrn = arcmsr_handle_hbb_isr(acb);
5623 break;
5624
5625 case ACB_ADAPTER_TYPE_C:
5626 retrn = arcmsr_handle_hbc_isr(acb);
5627 break;
5628
5629 default:
5630 /* We should never be here */
5631 ASSERT(0);
5632 break;
5633 }
5634 mutex_exit(&acb->isr_mutex);
5635 while ((ccb = arcmsr_get_complete_ccb_from_list(acb)) != NULL) {
5636 arcmsr_ccb_complete(ccb, 1);
5637 }
5638 return (retrn);
5639 }
5640
5641
5642 static void
arcmsr_wait_firmware_ready(struct ACB * acb)5643 arcmsr_wait_firmware_ready(struct ACB *acb) {
5644
5645 uint32_t firmware_state;
5646
5647 firmware_state = 0;
5648
5649 switch (acb->adapter_type) {
5650 case ACB_ADAPTER_TYPE_A:
5651 {
5652 struct HBA_msgUnit *phbamu;
5653 phbamu = (struct HBA_msgUnit *)acb->pmu;
5654 do {
5655 firmware_state =
5656 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5657 &phbamu->outbound_msgaddr1);
5658 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)
5659 == 0);
5660 break;
5661 }
5662
5663 case ACB_ADAPTER_TYPE_B:
5664 {
5665 struct HBB_msgUnit *phbbmu;
5666 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5667 do {
5668 firmware_state =
5669 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5670 &phbbmu->hbb_doorbell->iop2drv_doorbell);
5671 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
5672 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5673 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5674 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5675 break;
5676 }
5677
5678 case ACB_ADAPTER_TYPE_C:
5679 {
5680 struct HBC_msgUnit *phbcmu;
5681 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5682 do {
5683 firmware_state =
5684 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5685 &phbcmu->outbound_msgaddr1);
5686 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK)
5687 == 0);
5688 break;
5689 }
5690
5691 }
5692 }
5693
5694 static void
arcmsr_clear_doorbell_queue_buffer(struct ACB * acb)5695 arcmsr_clear_doorbell_queue_buffer(struct ACB *acb)
5696 {
5697 switch (acb->adapter_type) {
5698 case ACB_ADAPTER_TYPE_A: {
5699 struct HBA_msgUnit *phbamu;
5700 uint32_t outbound_doorbell;
5701
5702 phbamu = (struct HBA_msgUnit *)acb->pmu;
5703 /* empty doorbell Qbuffer if door bell rung */
5704 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5705 &phbamu->outbound_doorbell);
5706 /* clear doorbell interrupt */
5707 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5708 &phbamu->outbound_doorbell, outbound_doorbell);
5709 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5710 &phbamu->inbound_doorbell,
5711 ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
5712 break;
5713 }
5714
5715 case ACB_ADAPTER_TYPE_B: {
5716 struct HBB_msgUnit *phbbmu;
5717
5718 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5719 /* clear interrupt and message state */
5720 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5721 &phbbmu->hbb_doorbell->iop2drv_doorbell,
5722 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5723 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5724 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5725 ARCMSR_DRV2IOP_DATA_READ_OK);
5726 /* let IOP know data has been read */
5727 break;
5728 }
5729
5730 case ACB_ADAPTER_TYPE_C: {
5731 struct HBC_msgUnit *phbcmu;
5732 uint32_t outbound_doorbell;
5733
5734 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5735 /* empty doorbell Qbuffer if door bell ringed */
5736 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5737 &phbcmu->outbound_doorbell);
5738 /* clear outbound doobell isr */
5739 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5740 &phbcmu->outbound_doorbell_clear, outbound_doorbell);
5741 /* let IOP know data has been read */
5742 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5743 &phbcmu->inbound_doorbell,
5744 ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
5745 break;
5746 }
5747
5748 }
5749 }
5750
5751
5752 static uint32_t
arcmsr_iop_confirm(struct ACB * acb)5753 arcmsr_iop_confirm(struct ACB *acb) {
5754
5755 uint64_t cdb_phyaddr;
5756 uint32_t cdb_phyaddr_hi32;
5757
5758 /*
5759 * here we need to tell iop 331 about our freeccb.HighPart
5760 * if freeccb.HighPart is non-zero
5761 */
5762 cdb_phyaddr = acb->ccb_cookie.dmac_laddress;
5763 cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
5764 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
5765 switch (acb->adapter_type) {
5766 case ACB_ADAPTER_TYPE_A:
5767 if (cdb_phyaddr_hi32 != 0) {
5768 struct HBA_msgUnit *phbamu;
5769
5770 phbamu = (struct HBA_msgUnit *)acb->pmu;
5771 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5772 &phbamu->msgcode_rwbuffer[0],
5773 ARCMSR_SIGNATURE_SET_CONFIG);
5774 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5775 &phbamu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5776 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5777 &phbamu->inbound_msgaddr0,
5778 ARCMSR_INBOUND_MESG0_SET_CONFIG);
5779 if (!arcmsr_hba_wait_msgint_ready(acb)) {
5780 arcmsr_warn(acb,
5781 "timeout setting ccb "
5782 "high physical address");
5783 return (FALSE);
5784 }
5785 }
5786 break;
5787
5788 /* if adapter is type B, set window of "post command queue" */
5789 case ACB_ADAPTER_TYPE_B: {
5790 uint32_t post_queue_phyaddr;
5791 struct HBB_msgUnit *phbbmu;
5792
5793 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5794 phbbmu->postq_index = 0;
5795 phbbmu->doneq_index = 0;
5796 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5797 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5798 ARCMSR_MESSAGE_SET_POST_WINDOW);
5799
5800 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5801 arcmsr_warn(acb, "timeout setting post command "
5802 "queue window");
5803 return (FALSE);
5804 }
5805
5806 post_queue_phyaddr = (uint32_t)cdb_phyaddr +
5807 ARCMSR_MAX_FREECCB_NUM * P2ROUNDUP(sizeof (struct CCB), 32)
5808 + offsetof(struct HBB_msgUnit, post_qbuffer);
5809 /* driver "set config" signature */
5810 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5811 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0],
5812 ARCMSR_SIGNATURE_SET_CONFIG);
5813 /* normal should be zero */
5814 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5815 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1],
5816 cdb_phyaddr_hi32);
5817 /* postQ size (256+8)*4 */
5818 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5819 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2],
5820 post_queue_phyaddr);
5821 /* doneQ size (256+8)*4 */
5822 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5823 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3],
5824 post_queue_phyaddr+1056);
5825 /* ccb maxQ size must be --> [(256+8)*4] */
5826 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5827 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4], 1056);
5828 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5829 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5830 ARCMSR_MESSAGE_SET_CONFIG);
5831
5832 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5833 arcmsr_warn(acb,
5834 "timeout setting command queue window");
5835 return (FALSE);
5836 }
5837 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5838 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5839 ARCMSR_MESSAGE_START_DRIVER_MODE);
5840
5841 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5842 arcmsr_warn(acb, "timeout in 'start driver mode'");
5843 return (FALSE);
5844 }
5845 break;
5846 }
5847
5848 case ACB_ADAPTER_TYPE_C:
5849 if (cdb_phyaddr_hi32 != 0) {
5850 struct HBC_msgUnit *phbcmu;
5851
5852 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5853 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5854 &phbcmu->msgcode_rwbuffer[0],
5855 ARCMSR_SIGNATURE_SET_CONFIG);
5856 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5857 &phbcmu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5858 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5859 &phbcmu->inbound_msgaddr0,
5860 ARCMSR_INBOUND_MESG0_SET_CONFIG);
5861 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5862 &phbcmu->inbound_doorbell,
5863 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
5864 if (!arcmsr_hbc_wait_msgint_ready(acb)) {
5865 arcmsr_warn(acb, "'set ccb "
5866 "high part physical address' timeout");
5867 return (FALSE);
5868 }
5869 }
5870 break;
5871 }
5872 return (TRUE);
5873 }
5874
5875
5876 /*
5877 * ONLY used for Adapter type B
5878 */
5879 static void
arcmsr_enable_eoi_mode(struct ACB * acb)5880 arcmsr_enable_eoi_mode(struct ACB *acb)
5881 {
5882 struct HBB_msgUnit *phbbmu;
5883
5884 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5885
5886 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5887 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5888 ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
5889
5890 if (!arcmsr_hbb_wait_msgint_ready(acb))
5891 arcmsr_warn(acb, "'iop enable eoi mode' timeout");
5892 }
5893
5894 /* start background rebuild */
5895 static void
arcmsr_iop_init(struct ACB * acb)5896 arcmsr_iop_init(struct ACB *acb)
5897 {
5898 uint32_t intmask_org;
5899
5900 /* disable all outbound interrupt */
5901 intmask_org = arcmsr_disable_allintr(acb);
5902 arcmsr_wait_firmware_ready(acb);
5903 (void) arcmsr_iop_confirm(acb);
5904
5905 /* start background rebuild */
5906 switch (acb->adapter_type) {
5907 case ACB_ADAPTER_TYPE_A:
5908 arcmsr_get_hba_config(acb);
5909 arcmsr_start_hba_bgrb(acb);
5910 break;
5911 case ACB_ADAPTER_TYPE_B:
5912 arcmsr_get_hbb_config(acb);
5913 arcmsr_start_hbb_bgrb(acb);
5914 break;
5915 case ACB_ADAPTER_TYPE_C:
5916 arcmsr_get_hbc_config(acb);
5917 arcmsr_start_hbc_bgrb(acb);
5918 break;
5919 }
5920 /* empty doorbell Qbuffer if door bell rang */
5921 arcmsr_clear_doorbell_queue_buffer(acb);
5922
5923 if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
5924 arcmsr_enable_eoi_mode(acb);
5925
5926 /* enable outbound Post Queue, outbound doorbell Interrupt */
5927 arcmsr_enable_allintr(acb, intmask_org);
5928 acb->acb_flags |= ACB_F_IOP_INITED;
5929 }
5930