xref: /linux/drivers/scsi/myrb.c (revision 88e45067a30918ebb4942120892963e2311330af)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4  *
5  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6  *
7  * Based on the original DAC960 driver,
8  * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9  * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10  *
11  */
12 
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <linux/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
25 #include "myrb.h"
26 
27 static struct raid_template *myrb_raid_template;
28 
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
31 
myrb_logical_channel(struct Scsi_Host * shost)32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
33 {
34 	return shost->max_channel - 1;
35 }
36 
37 static struct myrb_devstate_name_entry {
38 	enum myrb_devstate state;
39 	const char *name;
40 } myrb_devstate_name_list[] = {
41 	{ MYRB_DEVICE_DEAD, "Dead" },
42 	{ MYRB_DEVICE_WO, "WriteOnly" },
43 	{ MYRB_DEVICE_ONLINE, "Online" },
44 	{ MYRB_DEVICE_CRITICAL, "Critical" },
45 	{ MYRB_DEVICE_STANDBY, "Standby" },
46 	{ MYRB_DEVICE_OFFLINE, "Offline" },
47 };
48 
myrb_devstate_name(enum myrb_devstate state)49 static const char *myrb_devstate_name(enum myrb_devstate state)
50 {
51 	struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52 	int i;
53 
54 	for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 		if (entry[i].state == state)
56 			return entry[i].name;
57 	}
58 	return "Unknown";
59 }
60 
61 static struct myrb_raidlevel_name_entry {
62 	enum myrb_raidlevel level;
63 	const char *name;
64 } myrb_raidlevel_name_list[] = {
65 	{ MYRB_RAID_LEVEL0, "RAID0" },
66 	{ MYRB_RAID_LEVEL1, "RAID1" },
67 	{ MYRB_RAID_LEVEL3, "RAID3" },
68 	{ MYRB_RAID_LEVEL5, "RAID5" },
69 	{ MYRB_RAID_LEVEL6, "RAID6" },
70 	{ MYRB_RAID_JBOD, "JBOD" },
71 };
72 
myrb_raidlevel_name(enum myrb_raidlevel level)73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74 {
75 	struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76 	int i;
77 
78 	for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 		if (entry[i].level == level)
80 			return entry[i].name;
81 	}
82 	return NULL;
83 }
84 
85 /*
86  * myrb_create_mempools - allocates auxiliary data structures
87  *
88  * Return: true on success, false otherwise.
89  */
myrb_create_mempools(struct pci_dev * pdev,struct myrb_hba * cb)90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91 {
92 	size_t elem_size, elem_align;
93 
94 	elem_align = sizeof(struct myrb_sge);
95 	elem_size = cb->host->sg_tablesize * elem_align;
96 	cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 				      elem_size, elem_align, 0);
98 	if (cb->sg_pool == NULL) {
99 		shost_printk(KERN_ERR, cb->host,
100 			     "Failed to allocate SG pool\n");
101 		return false;
102 	}
103 
104 	cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 				       sizeof(struct myrb_dcdb),
106 				       sizeof(unsigned int), 0);
107 	if (!cb->dcdb_pool) {
108 		dma_pool_destroy(cb->sg_pool);
109 		cb->sg_pool = NULL;
110 		shost_printk(KERN_ERR, cb->host,
111 			     "Failed to allocate DCDB pool\n");
112 		return false;
113 	}
114 
115 	cb->work_q = alloc_ordered_workqueue("myrb_wq_%d", WQ_MEM_RECLAIM,
116 					     cb->host->host_no);
117 	if (!cb->work_q) {
118 		dma_pool_destroy(cb->dcdb_pool);
119 		cb->dcdb_pool = NULL;
120 		dma_pool_destroy(cb->sg_pool);
121 		cb->sg_pool = NULL;
122 		shost_printk(KERN_ERR, cb->host,
123 			     "Failed to create workqueue\n");
124 		return false;
125 	}
126 
127 	/*
128 	 * Initialize the Monitoring Timer.
129 	 */
130 	INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
131 	queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
132 
133 	return true;
134 }
135 
136 /*
137  * myrb_destroy_mempools - tears down the memory pools for the controller
138  */
myrb_destroy_mempools(struct myrb_hba * cb)139 static void myrb_destroy_mempools(struct myrb_hba *cb)
140 {
141 	cancel_delayed_work_sync(&cb->monitor_work);
142 	destroy_workqueue(cb->work_q);
143 
144 	dma_pool_destroy(cb->sg_pool);
145 	dma_pool_destroy(cb->dcdb_pool);
146 }
147 
148 /*
149  * myrb_reset_cmd - reset command block
150  */
myrb_reset_cmd(struct myrb_cmdblk * cmd_blk)151 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
152 {
153 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
154 
155 	memset(mbox, 0, sizeof(union myrb_cmd_mbox));
156 	cmd_blk->status = 0;
157 }
158 
159 /*
160  * myrb_qcmd - queues command block for execution
161  */
myrb_qcmd(struct myrb_hba * cb,struct myrb_cmdblk * cmd_blk)162 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
163 {
164 	void __iomem *base = cb->io_base;
165 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
166 	union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
167 
168 	cb->write_cmd_mbox(next_mbox, mbox);
169 	if (cb->prev_cmd_mbox1->words[0] == 0 ||
170 	    cb->prev_cmd_mbox2->words[0] == 0)
171 		cb->get_cmd_mbox(base);
172 	cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
173 	cb->prev_cmd_mbox1 = next_mbox;
174 	if (++next_mbox > cb->last_cmd_mbox)
175 		next_mbox = cb->first_cmd_mbox;
176 	cb->next_cmd_mbox = next_mbox;
177 }
178 
179 /*
180  * myrb_exec_cmd - executes command block and waits for completion.
181  *
182  * Return: command status
183  */
myrb_exec_cmd(struct myrb_hba * cb,struct myrb_cmdblk * cmd_blk)184 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
185 		struct myrb_cmdblk *cmd_blk)
186 {
187 	DECLARE_COMPLETION_ONSTACK(cmpl);
188 	unsigned long flags;
189 
190 	cmd_blk->completion = &cmpl;
191 
192 	spin_lock_irqsave(&cb->queue_lock, flags);
193 	cb->qcmd(cb, cmd_blk);
194 	spin_unlock_irqrestore(&cb->queue_lock, flags);
195 
196 	wait_for_completion(&cmpl);
197 	return cmd_blk->status;
198 }
199 
200 /*
201  * myrb_exec_type3 - executes a type 3 command and waits for completion.
202  *
203  * Return: command status
204  */
myrb_exec_type3(struct myrb_hba * cb,enum myrb_cmd_opcode op,dma_addr_t addr)205 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
206 		enum myrb_cmd_opcode op, dma_addr_t addr)
207 {
208 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
209 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
210 	unsigned short status;
211 
212 	mutex_lock(&cb->dcmd_mutex);
213 	myrb_reset_cmd(cmd_blk);
214 	mbox->type3.id = MYRB_DCMD_TAG;
215 	mbox->type3.opcode = op;
216 	mbox->type3.addr = addr;
217 	status = myrb_exec_cmd(cb, cmd_blk);
218 	mutex_unlock(&cb->dcmd_mutex);
219 	return status;
220 }
221 
222 /*
223  * myrb_exec_type3D - executes a type 3D command and waits for completion.
224  *
225  * Return: command status
226  */
myrb_exec_type3D(struct myrb_hba * cb,enum myrb_cmd_opcode op,struct scsi_device * sdev,struct myrb_pdev_state * pdev_info)227 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
228 		enum myrb_cmd_opcode op, struct scsi_device *sdev,
229 		struct myrb_pdev_state *pdev_info)
230 {
231 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
232 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
233 	unsigned short status;
234 	dma_addr_t pdev_info_addr;
235 
236 	pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
237 					sizeof(struct myrb_pdev_state),
238 					DMA_FROM_DEVICE);
239 	if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
240 		return MYRB_STATUS_SUBSYS_FAILED;
241 
242 	mutex_lock(&cb->dcmd_mutex);
243 	myrb_reset_cmd(cmd_blk);
244 	mbox->type3D.id = MYRB_DCMD_TAG;
245 	mbox->type3D.opcode = op;
246 	mbox->type3D.channel = sdev->channel;
247 	mbox->type3D.target = sdev->id;
248 	mbox->type3D.addr = pdev_info_addr;
249 	status = myrb_exec_cmd(cb, cmd_blk);
250 	mutex_unlock(&cb->dcmd_mutex);
251 	dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
252 			 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
253 	if (status == MYRB_STATUS_SUCCESS &&
254 	    mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
255 		myrb_translate_devstate(pdev_info);
256 
257 	return status;
258 }
259 
260 static char *myrb_event_msg[] = {
261 	"killed because write recovery failed",
262 	"killed because of SCSI bus reset failure",
263 	"killed because of double check condition",
264 	"killed because it was removed",
265 	"killed because of gross error on SCSI chip",
266 	"killed because of bad tag returned from drive",
267 	"killed because of timeout on SCSI command",
268 	"killed because of reset SCSI command issued from system",
269 	"killed because busy or parity error count exceeded limit",
270 	"killed because of 'kill drive' command from system",
271 	"killed because of selection timeout",
272 	"killed due to SCSI phase sequence error",
273 	"killed due to unknown status",
274 };
275 
276 /**
277  * myrb_get_event - get event log from HBA
278  * @cb: pointer to the hba structure
279  * @event: number of the event
280  *
281  * Execute a type 3E command and logs the event message
282  */
myrb_get_event(struct myrb_hba * cb,unsigned int event)283 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
284 {
285 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
286 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
287 	struct myrb_log_entry *ev_buf;
288 	dma_addr_t ev_addr;
289 	unsigned short status;
290 
291 	ev_buf = dma_alloc_coherent(&cb->pdev->dev,
292 				    sizeof(struct myrb_log_entry),
293 				    &ev_addr, GFP_KERNEL);
294 	if (!ev_buf)
295 		return;
296 
297 	myrb_reset_cmd(cmd_blk);
298 	mbox->type3E.id = MYRB_MCMD_TAG;
299 	mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
300 	mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
301 	mbox->type3E.opqual = 1;
302 	mbox->type3E.ev_seq = event;
303 	mbox->type3E.addr = ev_addr;
304 	status = myrb_exec_cmd(cb, cmd_blk);
305 	if (status != MYRB_STATUS_SUCCESS)
306 		shost_printk(KERN_INFO, cb->host,
307 			     "Failed to get event log %d, status %04x\n",
308 			     event, status);
309 
310 	else if (ev_buf->seq_num == event) {
311 		struct scsi_sense_hdr sshdr;
312 
313 		memset(&sshdr, 0, sizeof(sshdr));
314 		scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
315 
316 		if (sshdr.sense_key == VENDOR_SPECIFIC &&
317 		    sshdr.asc == 0x80 &&
318 		    sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
319 			shost_printk(KERN_CRIT, cb->host,
320 				     "Physical drive %d:%d: %s\n",
321 				     ev_buf->channel, ev_buf->target,
322 				     myrb_event_msg[sshdr.ascq]);
323 		else
324 			shost_printk(KERN_CRIT, cb->host,
325 				     "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
326 				     ev_buf->channel, ev_buf->target,
327 				     sshdr.sense_key, sshdr.asc, sshdr.ascq);
328 	}
329 
330 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
331 			  ev_buf, ev_addr);
332 }
333 
334 /*
335  * myrb_get_errtable - retrieves the error table from the controller
336  *
337  * Executes a type 3 command and logs the error table from the controller.
338  */
myrb_get_errtable(struct myrb_hba * cb)339 static void myrb_get_errtable(struct myrb_hba *cb)
340 {
341 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
342 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
343 	unsigned short status;
344 	struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
345 
346 	memcpy(&old_table, cb->err_table, sizeof(old_table));
347 
348 	myrb_reset_cmd(cmd_blk);
349 	mbox->type3.id = MYRB_MCMD_TAG;
350 	mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
351 	mbox->type3.addr = cb->err_table_addr;
352 	status = myrb_exec_cmd(cb, cmd_blk);
353 	if (status == MYRB_STATUS_SUCCESS) {
354 		struct myrb_error_entry *table = cb->err_table;
355 		struct myrb_error_entry *new, *old;
356 		size_t err_table_offset;
357 		struct scsi_device *sdev;
358 
359 		shost_for_each_device(sdev, cb->host) {
360 			if (sdev->channel >= myrb_logical_channel(cb->host))
361 				continue;
362 			err_table_offset = sdev->channel * MYRB_MAX_TARGETS
363 				+ sdev->id;
364 			new = table + err_table_offset;
365 			old = &old_table[err_table_offset];
366 			if (new->parity_err == old->parity_err &&
367 			    new->soft_err == old->soft_err &&
368 			    new->hard_err == old->hard_err &&
369 			    new->misc_err == old->misc_err)
370 				continue;
371 			sdev_printk(KERN_CRIT, sdev,
372 				    "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
373 				    new->parity_err, new->soft_err,
374 				    new->hard_err, new->misc_err);
375 		}
376 	}
377 }
378 
379 /*
380  * myrb_get_ldev_info - retrieves the logical device table from the controller
381  *
382  * Executes a type 3 command and updates the logical device table.
383  *
384  * Return: command status
385  */
myrb_get_ldev_info(struct myrb_hba * cb)386 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
387 {
388 	unsigned short status;
389 	int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
390 	struct Scsi_Host *shost = cb->host;
391 
392 	status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
393 				 cb->ldev_info_addr);
394 	if (status != MYRB_STATUS_SUCCESS)
395 		return status;
396 
397 	for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
398 		struct myrb_ldev_info *old = NULL;
399 		struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
400 		struct scsi_device *sdev;
401 
402 		sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
403 					  ldev_num, 0);
404 		if (!sdev) {
405 			if (new->state == MYRB_DEVICE_OFFLINE)
406 				continue;
407 			shost_printk(KERN_INFO, shost,
408 				     "Adding Logical Drive %d in state %s\n",
409 				     ldev_num, myrb_devstate_name(new->state));
410 			scsi_add_device(shost, myrb_logical_channel(shost),
411 					ldev_num, 0);
412 			continue;
413 		}
414 		old = sdev->hostdata;
415 		if (new->state != old->state)
416 			shost_printk(KERN_INFO, shost,
417 				     "Logical Drive %d is now %s\n",
418 				     ldev_num, myrb_devstate_name(new->state));
419 		if (new->wb_enabled != old->wb_enabled)
420 			sdev_printk(KERN_INFO, sdev,
421 				    "Logical Drive is now WRITE %s\n",
422 				    (new->wb_enabled ? "BACK" : "THRU"));
423 		memcpy(old, new, sizeof(*new));
424 		scsi_device_put(sdev);
425 	}
426 	return status;
427 }
428 
429 /*
430  * myrb_get_rbld_progress - get rebuild progress information
431  *
432  * Executes a type 3 command and returns the rebuild progress
433  * information.
434  *
435  * Return: command status
436  */
myrb_get_rbld_progress(struct myrb_hba * cb,struct myrb_rbld_progress * rbld)437 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
438 		struct myrb_rbld_progress *rbld)
439 {
440 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
441 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
442 	struct myrb_rbld_progress *rbld_buf;
443 	dma_addr_t rbld_addr;
444 	unsigned short status;
445 
446 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
447 				      sizeof(struct myrb_rbld_progress),
448 				      &rbld_addr, GFP_KERNEL);
449 	if (!rbld_buf)
450 		return MYRB_STATUS_RBLD_NOT_CHECKED;
451 
452 	myrb_reset_cmd(cmd_blk);
453 	mbox->type3.id = MYRB_MCMD_TAG;
454 	mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
455 	mbox->type3.addr = rbld_addr;
456 	status = myrb_exec_cmd(cb, cmd_blk);
457 	if (rbld)
458 		memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
459 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
460 			  rbld_buf, rbld_addr);
461 	return status;
462 }
463 
464 /*
465  * myrb_update_rbld_progress - updates the rebuild status
466  *
467  * Updates the rebuild status for the attached logical devices.
468  */
myrb_update_rbld_progress(struct myrb_hba * cb)469 static void myrb_update_rbld_progress(struct myrb_hba *cb)
470 {
471 	struct myrb_rbld_progress rbld_buf;
472 	unsigned short status;
473 
474 	status = myrb_get_rbld_progress(cb, &rbld_buf);
475 	if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
476 	    cb->last_rbld_status == MYRB_STATUS_SUCCESS)
477 		status = MYRB_STATUS_RBLD_SUCCESS;
478 	if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
479 		unsigned int blocks_done =
480 			rbld_buf.ldev_size - rbld_buf.blocks_left;
481 		struct scsi_device *sdev;
482 
483 		sdev = scsi_device_lookup(cb->host,
484 					  myrb_logical_channel(cb->host),
485 					  rbld_buf.ldev_num, 0);
486 		if (!sdev)
487 			return;
488 
489 		switch (status) {
490 		case MYRB_STATUS_SUCCESS:
491 			sdev_printk(KERN_INFO, sdev,
492 				    "Rebuild in Progress, %d%% completed\n",
493 				    (100 * (blocks_done >> 7))
494 				    / (rbld_buf.ldev_size >> 7));
495 			break;
496 		case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
497 			sdev_printk(KERN_INFO, sdev,
498 				    "Rebuild Failed due to Logical Drive Failure\n");
499 			break;
500 		case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
501 			sdev_printk(KERN_INFO, sdev,
502 				    "Rebuild Failed due to Bad Blocks on Other Drives\n");
503 			break;
504 		case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
505 			sdev_printk(KERN_INFO, sdev,
506 				    "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
507 			break;
508 		case MYRB_STATUS_RBLD_SUCCESS:
509 			sdev_printk(KERN_INFO, sdev,
510 				    "Rebuild Completed Successfully\n");
511 			break;
512 		case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
513 			sdev_printk(KERN_INFO, sdev,
514 				     "Rebuild Successfully Terminated\n");
515 			break;
516 		default:
517 			break;
518 		}
519 		scsi_device_put(sdev);
520 	}
521 	cb->last_rbld_status = status;
522 }
523 
524 /*
525  * myrb_get_cc_progress - retrieve the rebuild status
526  *
527  * Execute a type 3 Command and fetch the rebuild / consistency check
528  * status.
529  */
myrb_get_cc_progress(struct myrb_hba * cb)530 static void myrb_get_cc_progress(struct myrb_hba *cb)
531 {
532 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
533 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
534 	struct myrb_rbld_progress *rbld_buf;
535 	dma_addr_t rbld_addr;
536 	unsigned short status;
537 
538 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
539 				      sizeof(struct myrb_rbld_progress),
540 				      &rbld_addr, GFP_KERNEL);
541 	if (!rbld_buf) {
542 		cb->need_cc_status = true;
543 		return;
544 	}
545 	myrb_reset_cmd(cmd_blk);
546 	mbox->type3.id = MYRB_MCMD_TAG;
547 	mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
548 	mbox->type3.addr = rbld_addr;
549 	status = myrb_exec_cmd(cb, cmd_blk);
550 	if (status == MYRB_STATUS_SUCCESS) {
551 		unsigned int ldev_num = rbld_buf->ldev_num;
552 		unsigned int ldev_size = rbld_buf->ldev_size;
553 		unsigned int blocks_done =
554 			ldev_size - rbld_buf->blocks_left;
555 		struct scsi_device *sdev;
556 
557 		sdev = scsi_device_lookup(cb->host,
558 					  myrb_logical_channel(cb->host),
559 					  ldev_num, 0);
560 		if (sdev) {
561 			sdev_printk(KERN_INFO, sdev,
562 				    "Consistency Check in Progress: %d%% completed\n",
563 				    (100 * (blocks_done >> 7))
564 				    / (ldev_size >> 7));
565 			scsi_device_put(sdev);
566 		}
567 	}
568 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
569 			  rbld_buf, rbld_addr);
570 }
571 
572 /*
573  * myrb_bgi_control - updates background initialisation status
574  *
575  * Executes a type 3B command and updates the background initialisation status
576  */
myrb_bgi_control(struct myrb_hba * cb)577 static void myrb_bgi_control(struct myrb_hba *cb)
578 {
579 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
580 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
581 	struct myrb_bgi_status *bgi, *last_bgi;
582 	dma_addr_t bgi_addr;
583 	struct scsi_device *sdev = NULL;
584 	unsigned short status;
585 
586 	bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
587 				 &bgi_addr, GFP_KERNEL);
588 	if (!bgi) {
589 		shost_printk(KERN_ERR, cb->host,
590 			     "Failed to allocate bgi memory\n");
591 		return;
592 	}
593 	myrb_reset_cmd(cmd_blk);
594 	mbox->type3B.id = MYRB_DCMD_TAG;
595 	mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
596 	mbox->type3B.optype = 0x20;
597 	mbox->type3B.addr = bgi_addr;
598 	status = myrb_exec_cmd(cb, cmd_blk);
599 	last_bgi = &cb->bgi_status;
600 	sdev = scsi_device_lookup(cb->host,
601 				  myrb_logical_channel(cb->host),
602 				  bgi->ldev_num, 0);
603 	switch (status) {
604 	case MYRB_STATUS_SUCCESS:
605 		switch (bgi->status) {
606 		case MYRB_BGI_INVALID:
607 			break;
608 		case MYRB_BGI_STARTED:
609 			if (!sdev)
610 				break;
611 			sdev_printk(KERN_INFO, sdev,
612 				    "Background Initialization Started\n");
613 			break;
614 		case MYRB_BGI_INPROGRESS:
615 			if (!sdev)
616 				break;
617 			if (bgi->blocks_done == last_bgi->blocks_done &&
618 			    bgi->ldev_num == last_bgi->ldev_num)
619 				break;
620 			sdev_printk(KERN_INFO, sdev,
621 				 "Background Initialization in Progress: %d%% completed\n",
622 				 (100 * (bgi->blocks_done >> 7))
623 				 / (bgi->ldev_size >> 7));
624 			break;
625 		case MYRB_BGI_SUSPENDED:
626 			if (!sdev)
627 				break;
628 			sdev_printk(KERN_INFO, sdev,
629 				    "Background Initialization Suspended\n");
630 			break;
631 		case MYRB_BGI_CANCELLED:
632 			if (!sdev)
633 				break;
634 			sdev_printk(KERN_INFO, sdev,
635 				    "Background Initialization Cancelled\n");
636 			break;
637 		}
638 		memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
639 		break;
640 	case MYRB_STATUS_BGI_SUCCESS:
641 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
642 			sdev_printk(KERN_INFO, sdev,
643 				    "Background Initialization Completed Successfully\n");
644 		cb->bgi_status.status = MYRB_BGI_INVALID;
645 		break;
646 	case MYRB_STATUS_BGI_ABORTED:
647 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
648 			sdev_printk(KERN_INFO, sdev,
649 				    "Background Initialization Aborted\n");
650 		fallthrough;
651 	case MYRB_STATUS_NO_BGI_INPROGRESS:
652 		cb->bgi_status.status = MYRB_BGI_INVALID;
653 		break;
654 	}
655 	if (sdev)
656 		scsi_device_put(sdev);
657 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
658 			  bgi, bgi_addr);
659 }
660 
661 /*
662  * myrb_hba_enquiry - updates the controller status
663  *
664  * Executes a DAC_V1_Enquiry command and updates the controller status.
665  *
666  * Return: command status
667  */
myrb_hba_enquiry(struct myrb_hba * cb)668 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
669 {
670 	struct myrb_enquiry old, *new;
671 	unsigned short status;
672 
673 	memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
674 
675 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
676 	if (status != MYRB_STATUS_SUCCESS)
677 		return status;
678 
679 	new = cb->enquiry;
680 	if (new->ldev_count > old.ldev_count) {
681 		int ldev_num = old.ldev_count - 1;
682 
683 		while (++ldev_num < new->ldev_count)
684 			shost_printk(KERN_CRIT, cb->host,
685 				     "Logical Drive %d Now Exists\n",
686 				     ldev_num);
687 	}
688 	if (new->ldev_count < old.ldev_count) {
689 		int ldev_num = new->ldev_count - 1;
690 
691 		while (++ldev_num < old.ldev_count)
692 			shost_printk(KERN_CRIT, cb->host,
693 				     "Logical Drive %d No Longer Exists\n",
694 				     ldev_num);
695 	}
696 	if (new->status.deferred != old.status.deferred)
697 		shost_printk(KERN_CRIT, cb->host,
698 			     "Deferred Write Error Flag is now %s\n",
699 			     (new->status.deferred ? "TRUE" : "FALSE"));
700 	if (new->ev_seq != old.ev_seq) {
701 		cb->new_ev_seq = new->ev_seq;
702 		cb->need_err_info = true;
703 		shost_printk(KERN_INFO, cb->host,
704 			     "Event log %d/%d (%d/%d) available\n",
705 			     cb->old_ev_seq, cb->new_ev_seq,
706 			     old.ev_seq, new->ev_seq);
707 	}
708 	if ((new->ldev_critical > 0 &&
709 	     new->ldev_critical != old.ldev_critical) ||
710 	    (new->ldev_offline > 0 &&
711 	     new->ldev_offline != old.ldev_offline) ||
712 	    (new->ldev_count != old.ldev_count)) {
713 		shost_printk(KERN_INFO, cb->host,
714 			     "Logical drive count changed (%d/%d/%d)\n",
715 			     new->ldev_critical,
716 			     new->ldev_offline,
717 			     new->ldev_count);
718 		cb->need_ldev_info = true;
719 	}
720 	if (new->pdev_dead > 0 ||
721 	    new->pdev_dead != old.pdev_dead ||
722 	    time_after_eq(jiffies, cb->secondary_monitor_time
723 			  + MYRB_SECONDARY_MONITOR_INTERVAL)) {
724 		cb->need_bgi_status = cb->bgi_status_supported;
725 		cb->secondary_monitor_time = jiffies;
726 	}
727 	if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
728 	    new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
729 	    old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
730 	    old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
731 		cb->need_rbld = true;
732 		cb->rbld_first = (new->ldev_critical < old.ldev_critical);
733 	}
734 	if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
735 		switch (new->rbld) {
736 		case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
737 			shost_printk(KERN_INFO, cb->host,
738 				     "Consistency Check Completed Successfully\n");
739 			break;
740 		case MYRB_STDBY_RBLD_IN_PROGRESS:
741 		case MYRB_BG_RBLD_IN_PROGRESS:
742 			break;
743 		case MYRB_BG_CHECK_IN_PROGRESS:
744 			cb->need_cc_status = true;
745 			break;
746 		case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
747 			shost_printk(KERN_INFO, cb->host,
748 				     "Consistency Check Completed with Error\n");
749 			break;
750 		case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
751 			shost_printk(KERN_INFO, cb->host,
752 				     "Consistency Check Failed - Physical Device Failed\n");
753 			break;
754 		case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
755 			shost_printk(KERN_INFO, cb->host,
756 				     "Consistency Check Failed - Logical Drive Failed\n");
757 			break;
758 		case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
759 			shost_printk(KERN_INFO, cb->host,
760 				     "Consistency Check Failed - Other Causes\n");
761 			break;
762 		case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
763 			shost_printk(KERN_INFO, cb->host,
764 				     "Consistency Check Successfully Terminated\n");
765 			break;
766 		}
767 	else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
768 		cb->need_cc_status = true;
769 
770 	return MYRB_STATUS_SUCCESS;
771 }
772 
773 /*
774  * myrb_set_pdev_state - sets the device state for a physical device
775  *
776  * Return: command status
777  */
myrb_set_pdev_state(struct myrb_hba * cb,struct scsi_device * sdev,enum myrb_devstate state)778 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
779 		struct scsi_device *sdev, enum myrb_devstate state)
780 {
781 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
782 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
783 	unsigned short status;
784 
785 	mutex_lock(&cb->dcmd_mutex);
786 	mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
787 	mbox->type3D.id = MYRB_DCMD_TAG;
788 	mbox->type3D.channel = sdev->channel;
789 	mbox->type3D.target = sdev->id;
790 	mbox->type3D.state = state & 0x1F;
791 	status = myrb_exec_cmd(cb, cmd_blk);
792 	mutex_unlock(&cb->dcmd_mutex);
793 
794 	return status;
795 }
796 
797 /*
798  * myrb_enable_mmio - enables the Memory Mailbox Interface
799  *
800  * PD and P controller types have no memory mailbox, but still need the
801  * other dma mapped memory.
802  *
803  * Return: true on success, false otherwise.
804  */
myrb_enable_mmio(struct myrb_hba * cb,mbox_mmio_init_t mmio_init_fn)805 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
806 {
807 	void __iomem *base = cb->io_base;
808 	struct pci_dev *pdev = cb->pdev;
809 	size_t err_table_size;
810 	size_t ldev_info_size;
811 	union myrb_cmd_mbox *cmd_mbox_mem;
812 	struct myrb_stat_mbox *stat_mbox_mem;
813 	union myrb_cmd_mbox mbox;
814 	unsigned short status;
815 
816 	memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
817 
818 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
819 		dev_err(&pdev->dev, "DMA mask out of range\n");
820 		return false;
821 	}
822 
823 	cb->enquiry = dma_alloc_coherent(&pdev->dev,
824 					 sizeof(struct myrb_enquiry),
825 					 &cb->enquiry_addr, GFP_KERNEL);
826 	if (!cb->enquiry)
827 		return false;
828 
829 	err_table_size = sizeof(struct myrb_error_entry) *
830 		MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
831 	cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
832 					   &cb->err_table_addr, GFP_KERNEL);
833 	if (!cb->err_table)
834 		return false;
835 
836 	ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
837 	cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
838 					       &cb->ldev_info_addr, GFP_KERNEL);
839 	if (!cb->ldev_info_buf)
840 		return false;
841 
842 	/*
843 	 * Skip mailbox initialisation for PD and P Controllers
844 	 */
845 	if (!mmio_init_fn)
846 		return true;
847 
848 	/* These are the base addresses for the command memory mailbox array */
849 	cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
850 	cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
851 						cb->cmd_mbox_size,
852 						&cb->cmd_mbox_addr,
853 						GFP_KERNEL);
854 	if (!cb->first_cmd_mbox)
855 		return false;
856 
857 	cmd_mbox_mem = cb->first_cmd_mbox;
858 	cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
859 	cb->last_cmd_mbox = cmd_mbox_mem;
860 	cb->next_cmd_mbox = cb->first_cmd_mbox;
861 	cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
862 	cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
863 
864 	/* These are the base addresses for the status memory mailbox array */
865 	cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
866 	    sizeof(struct myrb_stat_mbox);
867 	cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
868 						 cb->stat_mbox_size,
869 						 &cb->stat_mbox_addr,
870 						 GFP_KERNEL);
871 	if (!cb->first_stat_mbox)
872 		return false;
873 
874 	stat_mbox_mem = cb->first_stat_mbox;
875 	stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
876 	cb->last_stat_mbox = stat_mbox_mem;
877 	cb->next_stat_mbox = cb->first_stat_mbox;
878 
879 	/* Enable the Memory Mailbox Interface. */
880 	cb->dual_mode_interface = true;
881 	mbox.typeX.opcode = 0x2B;
882 	mbox.typeX.id = 0;
883 	mbox.typeX.opcode2 = 0x14;
884 	mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
885 	mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
886 
887 	status = mmio_init_fn(pdev, base, &mbox);
888 	if (status != MYRB_STATUS_SUCCESS) {
889 		cb->dual_mode_interface = false;
890 		mbox.typeX.opcode2 = 0x10;
891 		status = mmio_init_fn(pdev, base, &mbox);
892 		if (status != MYRB_STATUS_SUCCESS) {
893 			dev_err(&pdev->dev,
894 				"Failed to enable mailbox, statux %02X\n",
895 				status);
896 			return false;
897 		}
898 	}
899 	return true;
900 }
901 
902 /*
903  * myrb_get_hba_config - reads the configuration information
904  *
905  * Reads the configuration information from the controller and
906  * initializes the controller structure.
907  *
908  * Return: 0 on success, errno otherwise
909  */
myrb_get_hba_config(struct myrb_hba * cb)910 static int myrb_get_hba_config(struct myrb_hba *cb)
911 {
912 	struct myrb_enquiry2 *enquiry2;
913 	dma_addr_t enquiry2_addr;
914 	struct myrb_config2 *config2;
915 	dma_addr_t config2_addr;
916 	struct Scsi_Host *shost = cb->host;
917 	struct pci_dev *pdev = cb->pdev;
918 	int pchan_max = 0, pchan_cur = 0;
919 	unsigned short status;
920 	int ret = -ENODEV, memsize = 0;
921 
922 	enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
923 				      &enquiry2_addr, GFP_KERNEL);
924 	if (!enquiry2) {
925 		shost_printk(KERN_ERR, cb->host,
926 			     "Failed to allocate V1 enquiry2 memory\n");
927 		return -ENOMEM;
928 	}
929 	config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
930 				     &config2_addr, GFP_KERNEL);
931 	if (!config2) {
932 		shost_printk(KERN_ERR, cb->host,
933 			     "Failed to allocate V1 config2 memory\n");
934 		dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
935 				  enquiry2, enquiry2_addr);
936 		return -ENOMEM;
937 	}
938 	mutex_lock(&cb->dma_mutex);
939 	status = myrb_hba_enquiry(cb);
940 	mutex_unlock(&cb->dma_mutex);
941 	if (status != MYRB_STATUS_SUCCESS) {
942 		shost_printk(KERN_WARNING, cb->host,
943 			     "Failed it issue V1 Enquiry\n");
944 		goto out_free;
945 	}
946 
947 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
948 	if (status != MYRB_STATUS_SUCCESS) {
949 		shost_printk(KERN_WARNING, cb->host,
950 			     "Failed to issue V1 Enquiry2\n");
951 		goto out_free;
952 	}
953 
954 	status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
955 	if (status != MYRB_STATUS_SUCCESS) {
956 		shost_printk(KERN_WARNING, cb->host,
957 			     "Failed to issue ReadConfig2\n");
958 		goto out_free;
959 	}
960 
961 	status = myrb_get_ldev_info(cb);
962 	if (status != MYRB_STATUS_SUCCESS) {
963 		shost_printk(KERN_WARNING, cb->host,
964 			     "Failed to get logical drive information\n");
965 		goto out_free;
966 	}
967 
968 	/*
969 	 * Initialize the Controller Model Name and Full Model Name fields.
970 	 */
971 	switch (enquiry2->hw.sub_model) {
972 	case DAC960_V1_P_PD_PU:
973 		if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
974 			strcpy(cb->model_name, "DAC960PU");
975 		else
976 			strcpy(cb->model_name, "DAC960PD");
977 		break;
978 	case DAC960_V1_PL:
979 		strcpy(cb->model_name, "DAC960PL");
980 		break;
981 	case DAC960_V1_PG:
982 		strcpy(cb->model_name, "DAC960PG");
983 		break;
984 	case DAC960_V1_PJ:
985 		strcpy(cb->model_name, "DAC960PJ");
986 		break;
987 	case DAC960_V1_PR:
988 		strcpy(cb->model_name, "DAC960PR");
989 		break;
990 	case DAC960_V1_PT:
991 		strcpy(cb->model_name, "DAC960PT");
992 		break;
993 	case DAC960_V1_PTL0:
994 		strcpy(cb->model_name, "DAC960PTL0");
995 		break;
996 	case DAC960_V1_PRL:
997 		strcpy(cb->model_name, "DAC960PRL");
998 		break;
999 	case DAC960_V1_PTL1:
1000 		strcpy(cb->model_name, "DAC960PTL1");
1001 		break;
1002 	case DAC960_V1_1164P:
1003 		strcpy(cb->model_name, "eXtremeRAID 1100");
1004 		break;
1005 	default:
1006 		shost_printk(KERN_WARNING, cb->host,
1007 			     "Unknown Model %X\n",
1008 			     enquiry2->hw.sub_model);
1009 		goto out;
1010 	}
1011 	/*
1012 	 * Initialize the Controller Firmware Version field and verify that it
1013 	 * is a supported firmware version.
1014 	 * The supported firmware versions are:
1015 	 *
1016 	 * DAC1164P		    5.06 and above
1017 	 * DAC960PTL/PRL/PJ/PG	    4.06 and above
1018 	 * DAC960PU/PD/PL	    3.51 and above
1019 	 * DAC960PU/PD/PL/P	    2.73 and above
1020 	 */
1021 #if defined(CONFIG_ALPHA)
1022 	/*
1023 	 * DEC Alpha machines were often equipped with DAC960 cards that were
1024 	 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1025 	 * the last custom FW revision to be released by DEC for these older
1026 	 * controllers, appears to work quite well with this driver.
1027 	 *
1028 	 * Cards tested successfully were several versions each of the PD and
1029 	 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1030 	 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1031 	 * back of the board, of:
1032 	 *
1033 	 * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
1034 	 *         or D040349 (3-channel)
1035 	 * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
1036 	 *         or D040397 (3-channel)
1037 	 */
1038 # define FIRMWARE_27X	"2.70"
1039 #else
1040 # define FIRMWARE_27X	"2.73"
1041 #endif
1042 
1043 	if (enquiry2->fw.major_version == 0) {
1044 		enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1045 		enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1046 		enquiry2->fw.firmware_type = '0';
1047 		enquiry2->fw.turn_id = 0;
1048 	}
1049 	snprintf(cb->fw_version, sizeof(cb->fw_version),
1050 		"%u.%02u-%c-%02u",
1051 		enquiry2->fw.major_version,
1052 		enquiry2->fw.minor_version,
1053 		enquiry2->fw.firmware_type,
1054 		enquiry2->fw.turn_id);
1055 	if (!((enquiry2->fw.major_version == 5 &&
1056 	       enquiry2->fw.minor_version >= 6) ||
1057 	      (enquiry2->fw.major_version == 4 &&
1058 	       enquiry2->fw.minor_version >= 6) ||
1059 	      (enquiry2->fw.major_version == 3 &&
1060 	       enquiry2->fw.minor_version >= 51) ||
1061 	      (enquiry2->fw.major_version == 2 &&
1062 	       strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1063 		shost_printk(KERN_WARNING, cb->host,
1064 			"Firmware Version '%s' unsupported\n",
1065 			cb->fw_version);
1066 		goto out;
1067 	}
1068 	/*
1069 	 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1070 	 * Enclosure Management Enabled fields.
1071 	 */
1072 	switch (enquiry2->hw.model) {
1073 	case MYRB_5_CHANNEL_BOARD:
1074 		pchan_max = 5;
1075 		break;
1076 	case MYRB_3_CHANNEL_BOARD:
1077 	case MYRB_3_CHANNEL_ASIC_DAC:
1078 		pchan_max = 3;
1079 		break;
1080 	case MYRB_2_CHANNEL_BOARD:
1081 		pchan_max = 2;
1082 		break;
1083 	default:
1084 		pchan_max = enquiry2->cfg_chan;
1085 		break;
1086 	}
1087 	pchan_cur = enquiry2->cur_chan;
1088 	if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1089 		cb->bus_width = 32;
1090 	else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1091 		cb->bus_width = 16;
1092 	else
1093 		cb->bus_width = 8;
1094 	cb->ldev_block_size = enquiry2->ldev_block_size;
1095 	shost->max_channel = pchan_cur;
1096 	shost->max_id = enquiry2->max_targets;
1097 	memsize = enquiry2->mem_size >> 20;
1098 	cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1099 	/*
1100 	 * Initialize the Controller Queue Depth, Driver Queue Depth,
1101 	 * Logical Drive Count, Maximum Blocks per Command, Controller
1102 	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1103 	 * The Driver Queue Depth must be at most one less than the
1104 	 * Controller Queue Depth to allow for an automatic drive
1105 	 * rebuild operation.
1106 	 */
1107 	shost->can_queue = cb->enquiry->max_tcq;
1108 	if (shost->can_queue < 3)
1109 		shost->can_queue = enquiry2->max_cmds;
1110 	if (shost->can_queue < 3)
1111 		/* Play safe and disable TCQ */
1112 		shost->can_queue = 1;
1113 
1114 	if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1115 		shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1116 	shost->max_sectors = enquiry2->max_sectors;
1117 	shost->sg_tablesize = enquiry2->max_sge;
1118 	if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1119 		shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1120 	/*
1121 	 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1122 	 */
1123 	cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1124 		>> (10 - MYRB_BLKSIZE_BITS);
1125 	cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1126 		>> (10 - MYRB_BLKSIZE_BITS);
1127 	/* Assume 255/63 translation */
1128 	cb->ldev_geom_heads = 255;
1129 	cb->ldev_geom_sectors = 63;
1130 	if (config2->drive_geometry) {
1131 		cb->ldev_geom_heads = 128;
1132 		cb->ldev_geom_sectors = 32;
1133 	}
1134 
1135 	/*
1136 	 * Initialize the Background Initialization Status.
1137 	 */
1138 	if ((cb->fw_version[0] == '4' &&
1139 	     strcmp(cb->fw_version, "4.08") >= 0) ||
1140 	    (cb->fw_version[0] == '5' &&
1141 	     strcmp(cb->fw_version, "5.08") >= 0)) {
1142 		cb->bgi_status_supported = true;
1143 		myrb_bgi_control(cb);
1144 	}
1145 	cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1146 	ret = 0;
1147 
1148 out:
1149 	shost_printk(KERN_INFO, cb->host,
1150 		"Configuring %s PCI RAID Controller\n", cb->model_name);
1151 	shost_printk(KERN_INFO, cb->host,
1152 		"  Firmware Version: %s, Memory Size: %dMB\n",
1153 		cb->fw_version, memsize);
1154 	if (cb->io_addr == 0)
1155 		shost_printk(KERN_INFO, cb->host,
1156 			"  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1157 			(unsigned long)cb->pci_addr, cb->irq);
1158 	else
1159 		shost_printk(KERN_INFO, cb->host,
1160 			"  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1161 			(unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1162 			cb->irq);
1163 	shost_printk(KERN_INFO, cb->host,
1164 		"  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1165 		cb->host->can_queue, cb->host->max_sectors);
1166 	shost_printk(KERN_INFO, cb->host,
1167 		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1168 		     cb->host->can_queue, cb->host->sg_tablesize,
1169 		     MYRB_SCATTER_GATHER_LIMIT);
1170 	shost_printk(KERN_INFO, cb->host,
1171 		     "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1172 		     cb->stripe_size, cb->segment_size,
1173 		     cb->ldev_geom_heads, cb->ldev_geom_sectors,
1174 		     cb->safte_enabled ?
1175 		     "  SAF-TE Enclosure Management Enabled" : "");
1176 	shost_printk(KERN_INFO, cb->host,
1177 		     "  Physical: %d/%d channels %d/%d/%d devices\n",
1178 		     pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1179 		     cb->host->max_id);
1180 
1181 	shost_printk(KERN_INFO, cb->host,
1182 		     "  Logical: 1/1 channels, %d/%d disks\n",
1183 		     cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1184 
1185 out_free:
1186 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1187 			  enquiry2, enquiry2_addr);
1188 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1189 			  config2, config2_addr);
1190 
1191 	return ret;
1192 }
1193 
1194 /*
1195  * myrb_unmap - unmaps controller structures
1196  */
myrb_unmap(struct myrb_hba * cb)1197 static void myrb_unmap(struct myrb_hba *cb)
1198 {
1199 	if (cb->ldev_info_buf) {
1200 		size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1201 			MYRB_MAX_LDEVS;
1202 		dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1203 				  cb->ldev_info_buf, cb->ldev_info_addr);
1204 		cb->ldev_info_buf = NULL;
1205 	}
1206 	if (cb->err_table) {
1207 		size_t err_table_size = sizeof(struct myrb_error_entry) *
1208 			MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1209 		dma_free_coherent(&cb->pdev->dev, err_table_size,
1210 				  cb->err_table, cb->err_table_addr);
1211 		cb->err_table = NULL;
1212 	}
1213 	if (cb->enquiry) {
1214 		dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1215 				  cb->enquiry, cb->enquiry_addr);
1216 		cb->enquiry = NULL;
1217 	}
1218 	if (cb->first_stat_mbox) {
1219 		dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1220 				  cb->first_stat_mbox, cb->stat_mbox_addr);
1221 		cb->first_stat_mbox = NULL;
1222 	}
1223 	if (cb->first_cmd_mbox) {
1224 		dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1225 				  cb->first_cmd_mbox, cb->cmd_mbox_addr);
1226 		cb->first_cmd_mbox = NULL;
1227 	}
1228 }
1229 
1230 /*
1231  * myrb_cleanup - cleanup controller structures
1232  */
myrb_cleanup(struct myrb_hba * cb)1233 static void myrb_cleanup(struct myrb_hba *cb)
1234 {
1235 	struct pci_dev *pdev = cb->pdev;
1236 
1237 	/* Free the memory mailbox, status, and related structures */
1238 	myrb_unmap(cb);
1239 
1240 	if (cb->mmio_base) {
1241 		if (cb->disable_intr)
1242 			cb->disable_intr(cb->io_base);
1243 		iounmap(cb->mmio_base);
1244 	}
1245 	if (cb->irq)
1246 		free_irq(cb->irq, cb);
1247 	if (cb->io_addr)
1248 		release_region(cb->io_addr, 0x80);
1249 	pci_set_drvdata(pdev, NULL);
1250 	pci_disable_device(pdev);
1251 	scsi_host_put(cb->host);
1252 }
1253 
myrb_host_reset(struct scsi_cmnd * scmd)1254 static int myrb_host_reset(struct scsi_cmnd *scmd)
1255 {
1256 	struct Scsi_Host *shost = scmd->device->host;
1257 	struct myrb_hba *cb = shost_priv(shost);
1258 
1259 	cb->reset(cb->io_base);
1260 	return SUCCESS;
1261 }
1262 
myrb_pthru_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1263 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1264 		struct scsi_cmnd *scmd)
1265 {
1266 	struct request *rq = scsi_cmd_to_rq(scmd);
1267 	struct myrb_hba *cb = shost_priv(shost);
1268 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1269 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1270 	struct myrb_dcdb *dcdb;
1271 	dma_addr_t dcdb_addr;
1272 	struct scsi_device *sdev = scmd->device;
1273 	struct scatterlist *sgl;
1274 	unsigned long flags;
1275 	int nsge;
1276 
1277 	myrb_reset_cmd(cmd_blk);
1278 	dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1279 	if (!dcdb)
1280 		return SCSI_MLQUEUE_HOST_BUSY;
1281 	nsge = scsi_dma_map(scmd);
1282 	if (nsge > 1) {
1283 		dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1284 		scmd->result = (DID_ERROR << 16);
1285 		scsi_done(scmd);
1286 		return 0;
1287 	}
1288 
1289 	mbox->type3.opcode = MYRB_CMD_DCDB;
1290 	mbox->type3.id = rq->tag + 3;
1291 	mbox->type3.addr = dcdb_addr;
1292 	dcdb->channel = sdev->channel;
1293 	dcdb->target = sdev->id;
1294 	switch (scmd->sc_data_direction) {
1295 	case DMA_NONE:
1296 		dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1297 		break;
1298 	case DMA_TO_DEVICE:
1299 		dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1300 		break;
1301 	case DMA_FROM_DEVICE:
1302 		dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1303 		break;
1304 	default:
1305 		dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1306 		break;
1307 	}
1308 	dcdb->early_status = false;
1309 	if (rq->timeout <= 10)
1310 		dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1311 	else if (rq->timeout <= 60)
1312 		dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1313 	else if (rq->timeout <= 600)
1314 		dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1315 	else
1316 		dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1317 	dcdb->no_autosense = false;
1318 	dcdb->allow_disconnect = true;
1319 	sgl = scsi_sglist(scmd);
1320 	dcdb->dma_addr = sg_dma_address(sgl);
1321 	if (sg_dma_len(sgl) > USHRT_MAX) {
1322 		dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1323 		dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1324 	} else {
1325 		dcdb->xfer_len_lo = sg_dma_len(sgl);
1326 		dcdb->xfer_len_hi4 = 0;
1327 	}
1328 	dcdb->cdb_len = scmd->cmd_len;
1329 	dcdb->sense_len = sizeof(dcdb->sense);
1330 	memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1331 
1332 	spin_lock_irqsave(&cb->queue_lock, flags);
1333 	cb->qcmd(cb, cmd_blk);
1334 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1335 	return 0;
1336 }
1337 
myrb_inquiry(struct myrb_hba * cb,struct scsi_cmnd * scmd)1338 static void myrb_inquiry(struct myrb_hba *cb,
1339 		struct scsi_cmnd *scmd)
1340 {
1341 	unsigned char inq[36] = {
1342 		0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1343 		0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1344 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1345 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 		0x20, 0x20, 0x20, 0x20,
1347 	};
1348 
1349 	if (cb->bus_width > 16)
1350 		inq[7] |= 1 << 6;
1351 	if (cb->bus_width > 8)
1352 		inq[7] |= 1 << 5;
1353 	memcpy(&inq[16], cb->model_name, 16);
1354 	memcpy(&inq[32], cb->fw_version, 1);
1355 	memcpy(&inq[33], &cb->fw_version[2], 2);
1356 	memcpy(&inq[35], &cb->fw_version[7], 1);
1357 
1358 	scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1359 }
1360 
1361 static void
myrb_mode_sense(struct myrb_hba * cb,struct scsi_cmnd * scmd,struct myrb_ldev_info * ldev_info)1362 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1363 		struct myrb_ldev_info *ldev_info)
1364 {
1365 	unsigned char modes[32], *mode_pg;
1366 	bool dbd;
1367 	size_t mode_len;
1368 
1369 	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1370 	if (dbd) {
1371 		mode_len = 24;
1372 		mode_pg = &modes[4];
1373 	} else {
1374 		mode_len = 32;
1375 		mode_pg = &modes[12];
1376 	}
1377 	memset(modes, 0, sizeof(modes));
1378 	modes[0] = mode_len - 1;
1379 	if (!dbd) {
1380 		unsigned char *block_desc = &modes[4];
1381 
1382 		modes[3] = 8;
1383 		put_unaligned_be32(ldev_info->size, &block_desc[0]);
1384 		put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1385 	}
1386 	mode_pg[0] = 0x08;
1387 	mode_pg[1] = 0x12;
1388 	if (ldev_info->wb_enabled)
1389 		mode_pg[2] |= 0x04;
1390 	if (cb->segment_size) {
1391 		mode_pg[2] |= 0x08;
1392 		put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1393 	}
1394 
1395 	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1396 }
1397 
myrb_request_sense(struct myrb_hba * cb,struct scsi_cmnd * scmd)1398 static void myrb_request_sense(struct myrb_hba *cb,
1399 		struct scsi_cmnd *scmd)
1400 {
1401 	scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
1402 	scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1403 				 SCSI_SENSE_BUFFERSIZE);
1404 }
1405 
myrb_read_capacity(struct myrb_hba * cb,struct scsi_cmnd * scmd,struct myrb_ldev_info * ldev_info)1406 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1407 		struct myrb_ldev_info *ldev_info)
1408 {
1409 	unsigned char data[8];
1410 
1411 	dev_dbg(&scmd->device->sdev_gendev,
1412 		"Capacity %u, blocksize %u\n",
1413 		ldev_info->size, cb->ldev_block_size);
1414 	put_unaligned_be32(ldev_info->size - 1, &data[0]);
1415 	put_unaligned_be32(cb->ldev_block_size, &data[4]);
1416 	scsi_sg_copy_from_buffer(scmd, data, 8);
1417 }
1418 
myrb_ldev_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1419 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1420 		struct scsi_cmnd *scmd)
1421 {
1422 	struct myrb_hba *cb = shost_priv(shost);
1423 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1424 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1425 	struct myrb_ldev_info *ldev_info;
1426 	struct scsi_device *sdev = scmd->device;
1427 	struct scatterlist *sgl;
1428 	unsigned long flags;
1429 	u64 lba;
1430 	u32 block_cnt;
1431 	int nsge;
1432 
1433 	ldev_info = sdev->hostdata;
1434 	if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1435 	    ldev_info->state != MYRB_DEVICE_WO) {
1436 		dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1437 			sdev->id, ldev_info ? ldev_info->state : 0xff);
1438 		scmd->result = (DID_BAD_TARGET << 16);
1439 		scsi_done(scmd);
1440 		return 0;
1441 	}
1442 	switch (scmd->cmnd[0]) {
1443 	case TEST_UNIT_READY:
1444 		scmd->result = (DID_OK << 16);
1445 		scsi_done(scmd);
1446 		return 0;
1447 	case INQUIRY:
1448 		if (scmd->cmnd[1] & 1) {
1449 			/* Illegal request, invalid field in CDB */
1450 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1451 		} else {
1452 			myrb_inquiry(cb, scmd);
1453 			scmd->result = (DID_OK << 16);
1454 		}
1455 		scsi_done(scmd);
1456 		return 0;
1457 	case SYNCHRONIZE_CACHE:
1458 		scmd->result = (DID_OK << 16);
1459 		scsi_done(scmd);
1460 		return 0;
1461 	case MODE_SENSE:
1462 		if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1463 		    (scmd->cmnd[2] & 0x3F) != 0x08) {
1464 			/* Illegal request, invalid field in CDB */
1465 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1466 		} else {
1467 			myrb_mode_sense(cb, scmd, ldev_info);
1468 			scmd->result = (DID_OK << 16);
1469 		}
1470 		scsi_done(scmd);
1471 		return 0;
1472 	case READ_CAPACITY:
1473 		if ((scmd->cmnd[1] & 1) ||
1474 		    (scmd->cmnd[8] & 1)) {
1475 			/* Illegal request, invalid field in CDB */
1476 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1477 			scsi_done(scmd);
1478 			return 0;
1479 		}
1480 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1481 		if (lba) {
1482 			/* Illegal request, invalid field in CDB */
1483 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1484 			scsi_done(scmd);
1485 			return 0;
1486 		}
1487 		myrb_read_capacity(cb, scmd, ldev_info);
1488 		scsi_done(scmd);
1489 		return 0;
1490 	case REQUEST_SENSE:
1491 		myrb_request_sense(cb, scmd);
1492 		scmd->result = (DID_OK << 16);
1493 		return 0;
1494 	case SEND_DIAGNOSTIC:
1495 		if (scmd->cmnd[1] != 0x04) {
1496 			/* Illegal request, invalid field in CDB */
1497 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1498 		} else {
1499 			/* Assume good status */
1500 			scmd->result = (DID_OK << 16);
1501 		}
1502 		scsi_done(scmd);
1503 		return 0;
1504 	case READ_6:
1505 		if (ldev_info->state == MYRB_DEVICE_WO) {
1506 			/* Data protect, attempt to read invalid data */
1507 			scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1508 			scsi_done(scmd);
1509 			return 0;
1510 		}
1511 		fallthrough;
1512 	case WRITE_6:
1513 		lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1514 		       (scmd->cmnd[2] << 8) |
1515 		       scmd->cmnd[3]);
1516 		block_cnt = scmd->cmnd[4];
1517 		break;
1518 	case READ_10:
1519 		if (ldev_info->state == MYRB_DEVICE_WO) {
1520 			/* Data protect, attempt to read invalid data */
1521 			scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1522 			scsi_done(scmd);
1523 			return 0;
1524 		}
1525 		fallthrough;
1526 	case WRITE_10:
1527 	case VERIFY:		/* 0x2F */
1528 	case WRITE_VERIFY:	/* 0x2E */
1529 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1530 		block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1531 		break;
1532 	case READ_12:
1533 		if (ldev_info->state == MYRB_DEVICE_WO) {
1534 			/* Data protect, attempt to read invalid data */
1535 			scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
1536 			scsi_done(scmd);
1537 			return 0;
1538 		}
1539 		fallthrough;
1540 	case WRITE_12:
1541 	case VERIFY_12: /* 0xAF */
1542 	case WRITE_VERIFY_12:	/* 0xAE */
1543 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1544 		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1545 		break;
1546 	default:
1547 		/* Illegal request, invalid opcode */
1548 		scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
1549 		scsi_done(scmd);
1550 		return 0;
1551 	}
1552 
1553 	myrb_reset_cmd(cmd_blk);
1554 	mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
1555 	if (scmd->sc_data_direction == DMA_NONE)
1556 		goto submit;
1557 	nsge = scsi_dma_map(scmd);
1558 	if (nsge == 1) {
1559 		sgl = scsi_sglist(scmd);
1560 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1561 			mbox->type5.opcode = MYRB_CMD_READ;
1562 		else
1563 			mbox->type5.opcode = MYRB_CMD_WRITE;
1564 
1565 		mbox->type5.ld.xfer_len = block_cnt;
1566 		mbox->type5.ld.ldev_num = sdev->id;
1567 		mbox->type5.lba = lba;
1568 		mbox->type5.addr = (u32)sg_dma_address(sgl);
1569 	} else {
1570 		struct myrb_sge *hw_sgl;
1571 		dma_addr_t hw_sgl_addr;
1572 		int i;
1573 
1574 		hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1575 		if (!hw_sgl)
1576 			return SCSI_MLQUEUE_HOST_BUSY;
1577 
1578 		cmd_blk->sgl = hw_sgl;
1579 		cmd_blk->sgl_addr = hw_sgl_addr;
1580 
1581 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1582 			mbox->type5.opcode = MYRB_CMD_READ_SG;
1583 		else
1584 			mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1585 
1586 		mbox->type5.ld.xfer_len = block_cnt;
1587 		mbox->type5.ld.ldev_num = sdev->id;
1588 		mbox->type5.lba = lba;
1589 		mbox->type5.addr = hw_sgl_addr;
1590 		mbox->type5.sg_count = nsge;
1591 
1592 		scsi_for_each_sg(scmd, sgl, nsge, i) {
1593 			hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1594 			hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1595 			hw_sgl++;
1596 		}
1597 	}
1598 submit:
1599 	spin_lock_irqsave(&cb->queue_lock, flags);
1600 	cb->qcmd(cb, cmd_blk);
1601 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1602 
1603 	return 0;
1604 }
1605 
myrb_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1606 static int myrb_queuecommand(struct Scsi_Host *shost,
1607 		struct scsi_cmnd *scmd)
1608 {
1609 	struct scsi_device *sdev = scmd->device;
1610 
1611 	if (sdev->channel > myrb_logical_channel(shost)) {
1612 		scmd->result = (DID_BAD_TARGET << 16);
1613 		scsi_done(scmd);
1614 		return 0;
1615 	}
1616 	if (sdev->channel == myrb_logical_channel(shost))
1617 		return myrb_ldev_queuecommand(shost, scmd);
1618 
1619 	return myrb_pthru_queuecommand(shost, scmd);
1620 }
1621 
myrb_ldev_sdev_init(struct scsi_device * sdev)1622 static int myrb_ldev_sdev_init(struct scsi_device *sdev)
1623 {
1624 	struct myrb_hba *cb = shost_priv(sdev->host);
1625 	struct myrb_ldev_info *ldev_info;
1626 	unsigned short ldev_num = sdev->id;
1627 	enum raid_level level;
1628 
1629 	ldev_info = cb->ldev_info_buf + ldev_num;
1630 
1631 	sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1632 	if (!sdev->hostdata)
1633 		return -ENOMEM;
1634 	dev_dbg(&sdev->sdev_gendev,
1635 		"slave alloc ldev %d state %x\n",
1636 		ldev_num, ldev_info->state);
1637 	memcpy(sdev->hostdata, ldev_info,
1638 	       sizeof(*ldev_info));
1639 	switch (ldev_info->raid_level) {
1640 	case MYRB_RAID_LEVEL0:
1641 		level = RAID_LEVEL_LINEAR;
1642 		break;
1643 	case MYRB_RAID_LEVEL1:
1644 		level = RAID_LEVEL_1;
1645 		break;
1646 	case MYRB_RAID_LEVEL3:
1647 		level = RAID_LEVEL_3;
1648 		break;
1649 	case MYRB_RAID_LEVEL5:
1650 		level = RAID_LEVEL_5;
1651 		break;
1652 	case MYRB_RAID_LEVEL6:
1653 		level = RAID_LEVEL_6;
1654 		break;
1655 	case MYRB_RAID_JBOD:
1656 		level = RAID_LEVEL_JBOD;
1657 		break;
1658 	default:
1659 		level = RAID_LEVEL_UNKNOWN;
1660 		break;
1661 	}
1662 	raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1663 	return 0;
1664 }
1665 
myrb_pdev_sdev_init(struct scsi_device * sdev)1666 static int myrb_pdev_sdev_init(struct scsi_device *sdev)
1667 {
1668 	struct myrb_hba *cb = shost_priv(sdev->host);
1669 	struct myrb_pdev_state *pdev_info;
1670 	unsigned short status;
1671 
1672 	if (sdev->id > MYRB_MAX_TARGETS)
1673 		return -ENXIO;
1674 
1675 	pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
1676 	if (!pdev_info)
1677 		return -ENOMEM;
1678 
1679 	status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1680 				  sdev, pdev_info);
1681 	if (status != MYRB_STATUS_SUCCESS) {
1682 		dev_dbg(&sdev->sdev_gendev,
1683 			"Failed to get device state, status %x\n",
1684 			status);
1685 		kfree(pdev_info);
1686 		return -ENXIO;
1687 	}
1688 	if (!pdev_info->present) {
1689 		dev_dbg(&sdev->sdev_gendev,
1690 			"device not present, skip\n");
1691 		kfree(pdev_info);
1692 		return -ENXIO;
1693 	}
1694 	dev_dbg(&sdev->sdev_gendev,
1695 		"slave alloc pdev %d:%d state %x\n",
1696 		sdev->channel, sdev->id, pdev_info->state);
1697 	sdev->hostdata = pdev_info;
1698 
1699 	return 0;
1700 }
1701 
myrb_sdev_init(struct scsi_device * sdev)1702 static int myrb_sdev_init(struct scsi_device *sdev)
1703 {
1704 	if (sdev->channel > myrb_logical_channel(sdev->host))
1705 		return -ENXIO;
1706 
1707 	if (sdev->lun > 0)
1708 		return -ENXIO;
1709 
1710 	if (sdev->channel == myrb_logical_channel(sdev->host))
1711 		return myrb_ldev_sdev_init(sdev);
1712 
1713 	return myrb_pdev_sdev_init(sdev);
1714 }
1715 
myrb_sdev_configure(struct scsi_device * sdev,struct queue_limits * lim)1716 static int myrb_sdev_configure(struct scsi_device *sdev,
1717 			       struct queue_limits *lim)
1718 {
1719 	struct myrb_ldev_info *ldev_info;
1720 
1721 	if (sdev->channel > myrb_logical_channel(sdev->host))
1722 		return -ENXIO;
1723 
1724 	if (sdev->channel < myrb_logical_channel(sdev->host)) {
1725 		sdev->no_uld_attach = 1;
1726 		return 0;
1727 	}
1728 	if (sdev->lun != 0)
1729 		return -ENXIO;
1730 
1731 	ldev_info = sdev->hostdata;
1732 	if (!ldev_info)
1733 		return -ENXIO;
1734 	if (ldev_info->state != MYRB_DEVICE_ONLINE)
1735 		sdev_printk(KERN_INFO, sdev,
1736 			    "Logical drive is %s\n",
1737 			    myrb_devstate_name(ldev_info->state));
1738 
1739 	sdev->tagged_supported = 1;
1740 	return 0;
1741 }
1742 
myrb_sdev_destroy(struct scsi_device * sdev)1743 static void myrb_sdev_destroy(struct scsi_device *sdev)
1744 {
1745 	kfree(sdev->hostdata);
1746 }
1747 
myrb_biosparam(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])1748 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1749 		sector_t capacity, int geom[])
1750 {
1751 	struct myrb_hba *cb = shost_priv(sdev->host);
1752 
1753 	geom[0] = cb->ldev_geom_heads;
1754 	geom[1] = cb->ldev_geom_sectors;
1755 	geom[2] = sector_div(capacity, geom[0] * geom[1]);
1756 
1757 	return 0;
1758 }
1759 
raid_state_show(struct device * dev,struct device_attribute * attr,char * buf)1760 static ssize_t raid_state_show(struct device *dev,
1761 		struct device_attribute *attr, char *buf)
1762 {
1763 	struct scsi_device *sdev = to_scsi_device(dev);
1764 	struct myrb_hba *cb = shost_priv(sdev->host);
1765 	int ret;
1766 
1767 	if (!sdev->hostdata)
1768 		return snprintf(buf, 16, "Unknown\n");
1769 
1770 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1771 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1772 		const char *name;
1773 
1774 		name = myrb_devstate_name(ldev_info->state);
1775 		if (name)
1776 			ret = snprintf(buf, 64, "%s\n", name);
1777 		else
1778 			ret = snprintf(buf, 64, "Invalid (%02X)\n",
1779 				       ldev_info->state);
1780 	} else {
1781 		struct myrb_pdev_state *pdev_info = sdev->hostdata;
1782 		unsigned short status;
1783 		const char *name;
1784 
1785 		status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1786 					  sdev, pdev_info);
1787 		if (status != MYRB_STATUS_SUCCESS)
1788 			sdev_printk(KERN_INFO, sdev,
1789 				    "Failed to get device state, status %x\n",
1790 				    status);
1791 
1792 		if (!pdev_info->present)
1793 			name = "Removed";
1794 		else
1795 			name = myrb_devstate_name(pdev_info->state);
1796 		if (name)
1797 			ret = snprintf(buf, 64, "%s\n", name);
1798 		else
1799 			ret = snprintf(buf, 64, "Invalid (%02X)\n",
1800 				       pdev_info->state);
1801 	}
1802 	return ret;
1803 }
1804 
raid_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1805 static ssize_t raid_state_store(struct device *dev,
1806 		struct device_attribute *attr, const char *buf, size_t count)
1807 {
1808 	struct scsi_device *sdev = to_scsi_device(dev);
1809 	struct myrb_hba *cb = shost_priv(sdev->host);
1810 	struct myrb_pdev_state *pdev_info;
1811 	enum myrb_devstate new_state;
1812 	unsigned short status;
1813 
1814 	if (!strncmp(buf, "kill", 4) ||
1815 	    !strncmp(buf, "offline", 7))
1816 		new_state = MYRB_DEVICE_DEAD;
1817 	else if (!strncmp(buf, "online", 6))
1818 		new_state = MYRB_DEVICE_ONLINE;
1819 	else if (!strncmp(buf, "standby", 7))
1820 		new_state = MYRB_DEVICE_STANDBY;
1821 	else
1822 		return -EINVAL;
1823 
1824 	pdev_info = sdev->hostdata;
1825 	if (!pdev_info) {
1826 		sdev_printk(KERN_INFO, sdev,
1827 			    "Failed - no physical device information\n");
1828 		return -ENXIO;
1829 	}
1830 	if (!pdev_info->present) {
1831 		sdev_printk(KERN_INFO, sdev,
1832 			    "Failed - device not present\n");
1833 		return -ENXIO;
1834 	}
1835 
1836 	if (pdev_info->state == new_state)
1837 		return count;
1838 
1839 	status = myrb_set_pdev_state(cb, sdev, new_state);
1840 	switch (status) {
1841 	case MYRB_STATUS_SUCCESS:
1842 		break;
1843 	case MYRB_STATUS_START_DEVICE_FAILED:
1844 		sdev_printk(KERN_INFO, sdev,
1845 			     "Failed - Unable to Start Device\n");
1846 		count = -EAGAIN;
1847 		break;
1848 	case MYRB_STATUS_NO_DEVICE:
1849 		sdev_printk(KERN_INFO, sdev,
1850 			    "Failed - No Device at Address\n");
1851 		count = -ENODEV;
1852 		break;
1853 	case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1854 		sdev_printk(KERN_INFO, sdev,
1855 			 "Failed - Invalid Channel or Target or Modifier\n");
1856 		count = -EINVAL;
1857 		break;
1858 	case MYRB_STATUS_CHANNEL_BUSY:
1859 		sdev_printk(KERN_INFO, sdev,
1860 			 "Failed - Channel Busy\n");
1861 		count = -EBUSY;
1862 		break;
1863 	default:
1864 		sdev_printk(KERN_INFO, sdev,
1865 			 "Failed - Unexpected Status %04X\n", status);
1866 		count = -EIO;
1867 		break;
1868 	}
1869 	return count;
1870 }
1871 static DEVICE_ATTR_RW(raid_state);
1872 
raid_level_show(struct device * dev,struct device_attribute * attr,char * buf)1873 static ssize_t raid_level_show(struct device *dev,
1874 		struct device_attribute *attr, char *buf)
1875 {
1876 	struct scsi_device *sdev = to_scsi_device(dev);
1877 
1878 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1879 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1880 		const char *name;
1881 
1882 		if (!ldev_info)
1883 			return -ENXIO;
1884 
1885 		name = myrb_raidlevel_name(ldev_info->raid_level);
1886 		if (!name)
1887 			return snprintf(buf, 64, "Invalid (%02X)\n",
1888 					ldev_info->state);
1889 		return snprintf(buf, 64, "%s\n", name);
1890 	}
1891 	return snprintf(buf, 64, "Physical Drive\n");
1892 }
1893 static DEVICE_ATTR_RO(raid_level);
1894 
rebuild_show(struct device * dev,struct device_attribute * attr,char * buf)1895 static ssize_t rebuild_show(struct device *dev,
1896 		struct device_attribute *attr, char *buf)
1897 {
1898 	struct scsi_device *sdev = to_scsi_device(dev);
1899 	struct myrb_hba *cb = shost_priv(sdev->host);
1900 	struct myrb_rbld_progress rbld_buf;
1901 	unsigned char status;
1902 
1903 	if (sdev->channel < myrb_logical_channel(sdev->host))
1904 		return snprintf(buf, 64, "physical device - not rebuilding\n");
1905 
1906 	status = myrb_get_rbld_progress(cb, &rbld_buf);
1907 
1908 	if (rbld_buf.ldev_num != sdev->id ||
1909 	    status != MYRB_STATUS_SUCCESS)
1910 		return snprintf(buf, 64, "not rebuilding\n");
1911 
1912 	return snprintf(buf, 64, "rebuilding block %u of %u\n",
1913 			rbld_buf.ldev_size - rbld_buf.blocks_left,
1914 			rbld_buf.ldev_size);
1915 }
1916 
rebuild_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1917 static ssize_t rebuild_store(struct device *dev,
1918 		struct device_attribute *attr, const char *buf, size_t count)
1919 {
1920 	struct scsi_device *sdev = to_scsi_device(dev);
1921 	struct myrb_hba *cb = shost_priv(sdev->host);
1922 	struct myrb_cmdblk *cmd_blk;
1923 	union myrb_cmd_mbox *mbox;
1924 	unsigned short status;
1925 	int rc, start;
1926 	const char *msg;
1927 
1928 	rc = kstrtoint(buf, 0, &start);
1929 	if (rc)
1930 		return rc;
1931 
1932 	if (sdev->channel >= myrb_logical_channel(sdev->host))
1933 		return -ENXIO;
1934 
1935 	status = myrb_get_rbld_progress(cb, NULL);
1936 	if (start) {
1937 		if (status == MYRB_STATUS_SUCCESS) {
1938 			sdev_printk(KERN_INFO, sdev,
1939 				    "Rebuild Not Initiated; already in progress\n");
1940 			return -EALREADY;
1941 		}
1942 		mutex_lock(&cb->dcmd_mutex);
1943 		cmd_blk = &cb->dcmd_blk;
1944 		myrb_reset_cmd(cmd_blk);
1945 		mbox = &cmd_blk->mbox;
1946 		mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1947 		mbox->type3D.id = MYRB_DCMD_TAG;
1948 		mbox->type3D.channel = sdev->channel;
1949 		mbox->type3D.target = sdev->id;
1950 		status = myrb_exec_cmd(cb, cmd_blk);
1951 		mutex_unlock(&cb->dcmd_mutex);
1952 	} else {
1953 		struct pci_dev *pdev = cb->pdev;
1954 		unsigned char *rate;
1955 		dma_addr_t rate_addr;
1956 
1957 		if (status != MYRB_STATUS_SUCCESS) {
1958 			sdev_printk(KERN_INFO, sdev,
1959 				    "Rebuild Not Cancelled; not in progress\n");
1960 			return 0;
1961 		}
1962 
1963 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1964 					  &rate_addr, GFP_KERNEL);
1965 		if (rate == NULL) {
1966 			sdev_printk(KERN_INFO, sdev,
1967 				    "Cancellation of Rebuild Failed - Out of Memory\n");
1968 			return -ENOMEM;
1969 		}
1970 		mutex_lock(&cb->dcmd_mutex);
1971 		cmd_blk = &cb->dcmd_blk;
1972 		myrb_reset_cmd(cmd_blk);
1973 		mbox = &cmd_blk->mbox;
1974 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
1975 		mbox->type3R.id = MYRB_DCMD_TAG;
1976 		mbox->type3R.rbld_rate = 0xFF;
1977 		mbox->type3R.addr = rate_addr;
1978 		status = myrb_exec_cmd(cb, cmd_blk);
1979 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
1980 		mutex_unlock(&cb->dcmd_mutex);
1981 	}
1982 	if (status == MYRB_STATUS_SUCCESS) {
1983 		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1984 			    start ? "Initiated" : "Cancelled");
1985 		return count;
1986 	}
1987 	if (!start) {
1988 		sdev_printk(KERN_INFO, sdev,
1989 			    "Rebuild Not Cancelled, status 0x%x\n",
1990 			    status);
1991 		return -EIO;
1992 	}
1993 
1994 	switch (status) {
1995 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
1996 		msg = "Attempt to Rebuild Online or Unresponsive Drive";
1997 		break;
1998 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
1999 		msg = "New Disk Failed During Rebuild";
2000 		break;
2001 	case MYRB_STATUS_INVALID_ADDRESS:
2002 		msg = "Invalid Device Address";
2003 		break;
2004 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2005 		msg = "Already in Progress";
2006 		break;
2007 	default:
2008 		msg = NULL;
2009 		break;
2010 	}
2011 	if (msg)
2012 		sdev_printk(KERN_INFO, sdev,
2013 			    "Rebuild Failed - %s\n", msg);
2014 	else
2015 		sdev_printk(KERN_INFO, sdev,
2016 			    "Rebuild Failed, status 0x%x\n", status);
2017 
2018 	return -EIO;
2019 }
2020 static DEVICE_ATTR_RW(rebuild);
2021 
consistency_check_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2022 static ssize_t consistency_check_store(struct device *dev,
2023 		struct device_attribute *attr, const char *buf, size_t count)
2024 {
2025 	struct scsi_device *sdev = to_scsi_device(dev);
2026 	struct myrb_hba *cb = shost_priv(sdev->host);
2027 	struct myrb_rbld_progress rbld_buf;
2028 	struct myrb_cmdblk *cmd_blk;
2029 	union myrb_cmd_mbox *mbox;
2030 	unsigned short ldev_num = 0xFFFF;
2031 	unsigned short status;
2032 	int rc, start;
2033 	const char *msg;
2034 
2035 	rc = kstrtoint(buf, 0, &start);
2036 	if (rc)
2037 		return rc;
2038 
2039 	if (sdev->channel < myrb_logical_channel(sdev->host))
2040 		return -ENXIO;
2041 
2042 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2043 	if (start) {
2044 		if (status == MYRB_STATUS_SUCCESS) {
2045 			sdev_printk(KERN_INFO, sdev,
2046 				    "Check Consistency Not Initiated; already in progress\n");
2047 			return -EALREADY;
2048 		}
2049 		mutex_lock(&cb->dcmd_mutex);
2050 		cmd_blk = &cb->dcmd_blk;
2051 		myrb_reset_cmd(cmd_blk);
2052 		mbox = &cmd_blk->mbox;
2053 		mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2054 		mbox->type3C.id = MYRB_DCMD_TAG;
2055 		mbox->type3C.ldev_num = sdev->id;
2056 		mbox->type3C.auto_restore = true;
2057 
2058 		status = myrb_exec_cmd(cb, cmd_blk);
2059 		mutex_unlock(&cb->dcmd_mutex);
2060 	} else {
2061 		struct pci_dev *pdev = cb->pdev;
2062 		unsigned char *rate;
2063 		dma_addr_t rate_addr;
2064 
2065 		if (ldev_num != sdev->id) {
2066 			sdev_printk(KERN_INFO, sdev,
2067 				    "Check Consistency Not Cancelled; not in progress\n");
2068 			return 0;
2069 		}
2070 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2071 					  &rate_addr, GFP_KERNEL);
2072 		if (rate == NULL) {
2073 			sdev_printk(KERN_INFO, sdev,
2074 				    "Cancellation of Check Consistency Failed - Out of Memory\n");
2075 			return -ENOMEM;
2076 		}
2077 		mutex_lock(&cb->dcmd_mutex);
2078 		cmd_blk = &cb->dcmd_blk;
2079 		myrb_reset_cmd(cmd_blk);
2080 		mbox = &cmd_blk->mbox;
2081 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2082 		mbox->type3R.id = MYRB_DCMD_TAG;
2083 		mbox->type3R.rbld_rate = 0xFF;
2084 		mbox->type3R.addr = rate_addr;
2085 		status = myrb_exec_cmd(cb, cmd_blk);
2086 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2087 		mutex_unlock(&cb->dcmd_mutex);
2088 	}
2089 	if (status == MYRB_STATUS_SUCCESS) {
2090 		sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2091 			    start ? "Initiated" : "Cancelled");
2092 		return count;
2093 	}
2094 	if (!start) {
2095 		sdev_printk(KERN_INFO, sdev,
2096 			    "Check Consistency Not Cancelled, status 0x%x\n",
2097 			    status);
2098 		return -EIO;
2099 	}
2100 
2101 	switch (status) {
2102 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2103 		msg = "Dependent Physical Device is DEAD";
2104 		break;
2105 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2106 		msg = "New Disk Failed During Rebuild";
2107 		break;
2108 	case MYRB_STATUS_INVALID_ADDRESS:
2109 		msg = "Invalid or Nonredundant Logical Drive";
2110 		break;
2111 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2112 		msg = "Already in Progress";
2113 		break;
2114 	default:
2115 		msg = NULL;
2116 		break;
2117 	}
2118 	if (msg)
2119 		sdev_printk(KERN_INFO, sdev,
2120 			    "Check Consistency Failed - %s\n", msg);
2121 	else
2122 		sdev_printk(KERN_INFO, sdev,
2123 			    "Check Consistency Failed, status 0x%x\n", status);
2124 
2125 	return -EIO;
2126 }
2127 
consistency_check_show(struct device * dev,struct device_attribute * attr,char * buf)2128 static ssize_t consistency_check_show(struct device *dev,
2129 		struct device_attribute *attr, char *buf)
2130 {
2131 	return rebuild_show(dev, attr, buf);
2132 }
2133 static DEVICE_ATTR_RW(consistency_check);
2134 
ctlr_num_show(struct device * dev,struct device_attribute * attr,char * buf)2135 static ssize_t ctlr_num_show(struct device *dev,
2136 		struct device_attribute *attr, char *buf)
2137 {
2138 	struct Scsi_Host *shost = class_to_shost(dev);
2139 	struct myrb_hba *cb = shost_priv(shost);
2140 
2141 	return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2142 }
2143 static DEVICE_ATTR_RO(ctlr_num);
2144 
firmware_show(struct device * dev,struct device_attribute * attr,char * buf)2145 static ssize_t firmware_show(struct device *dev,
2146 		struct device_attribute *attr, char *buf)
2147 {
2148 	struct Scsi_Host *shost = class_to_shost(dev);
2149 	struct myrb_hba *cb = shost_priv(shost);
2150 
2151 	return snprintf(buf, 16, "%s\n", cb->fw_version);
2152 }
2153 static DEVICE_ATTR_RO(firmware);
2154 
model_show(struct device * dev,struct device_attribute * attr,char * buf)2155 static ssize_t model_show(struct device *dev,
2156 		struct device_attribute *attr, char *buf)
2157 {
2158 	struct Scsi_Host *shost = class_to_shost(dev);
2159 	struct myrb_hba *cb = shost_priv(shost);
2160 
2161 	return snprintf(buf, 16, "%s\n", cb->model_name);
2162 }
2163 static DEVICE_ATTR_RO(model);
2164 
flush_cache_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2165 static ssize_t flush_cache_store(struct device *dev,
2166 		struct device_attribute *attr, const char *buf, size_t count)
2167 {
2168 	struct Scsi_Host *shost = class_to_shost(dev);
2169 	struct myrb_hba *cb = shost_priv(shost);
2170 	unsigned short status;
2171 
2172 	status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2173 	if (status == MYRB_STATUS_SUCCESS) {
2174 		shost_printk(KERN_INFO, shost,
2175 			     "Cache Flush Completed\n");
2176 		return count;
2177 	}
2178 	shost_printk(KERN_INFO, shost,
2179 		     "Cache Flush Failed, status %x\n", status);
2180 	return -EIO;
2181 }
2182 static DEVICE_ATTR_WO(flush_cache);
2183 
2184 static struct attribute *myrb_sdev_attrs[] = {
2185 	&dev_attr_rebuild.attr,
2186 	&dev_attr_consistency_check.attr,
2187 	&dev_attr_raid_state.attr,
2188 	&dev_attr_raid_level.attr,
2189 	NULL,
2190 };
2191 
2192 ATTRIBUTE_GROUPS(myrb_sdev);
2193 
2194 static struct attribute *myrb_shost_attrs[] = {
2195 	&dev_attr_ctlr_num.attr,
2196 	&dev_attr_model.attr,
2197 	&dev_attr_firmware.attr,
2198 	&dev_attr_flush_cache.attr,
2199 	NULL,
2200 };
2201 
2202 ATTRIBUTE_GROUPS(myrb_shost);
2203 
2204 static const struct scsi_host_template myrb_template = {
2205 	.module			= THIS_MODULE,
2206 	.name			= "DAC960",
2207 	.proc_name		= "myrb",
2208 	.queuecommand		= myrb_queuecommand,
2209 	.eh_host_reset_handler	= myrb_host_reset,
2210 	.sdev_init		= myrb_sdev_init,
2211 	.sdev_configure		= myrb_sdev_configure,
2212 	.sdev_destroy		= myrb_sdev_destroy,
2213 	.bios_param		= myrb_biosparam,
2214 	.cmd_size		= sizeof(struct myrb_cmdblk),
2215 	.shost_groups		= myrb_shost_groups,
2216 	.sdev_groups		= myrb_sdev_groups,
2217 	.this_id		= -1,
2218 };
2219 
2220 /**
2221  * myrb_is_raid - return boolean indicating device is raid volume
2222  * @dev: the device struct object
2223  */
myrb_is_raid(struct device * dev)2224 static int myrb_is_raid(struct device *dev)
2225 {
2226 	struct scsi_device *sdev = to_scsi_device(dev);
2227 
2228 	return sdev->channel == myrb_logical_channel(sdev->host);
2229 }
2230 
2231 /**
2232  * myrb_get_resync - get raid volume resync percent complete
2233  * @dev: the device struct object
2234  */
myrb_get_resync(struct device * dev)2235 static void myrb_get_resync(struct device *dev)
2236 {
2237 	struct scsi_device *sdev = to_scsi_device(dev);
2238 	struct myrb_hba *cb = shost_priv(sdev->host);
2239 	struct myrb_rbld_progress rbld_buf;
2240 	unsigned int percent_complete = 0;
2241 	unsigned short status;
2242 	unsigned int ldev_size = 0, remaining = 0;
2243 
2244 	if (sdev->channel < myrb_logical_channel(sdev->host))
2245 		return;
2246 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2247 	if (status == MYRB_STATUS_SUCCESS) {
2248 		if (rbld_buf.ldev_num == sdev->id) {
2249 			ldev_size = rbld_buf.ldev_size;
2250 			remaining = rbld_buf.blocks_left;
2251 		}
2252 	}
2253 	if (remaining && ldev_size)
2254 		percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2255 	raid_set_resync(myrb_raid_template, dev, percent_complete);
2256 }
2257 
2258 /**
2259  * myrb_get_state - get raid volume status
2260  * @dev: the device struct object
2261  */
myrb_get_state(struct device * dev)2262 static void myrb_get_state(struct device *dev)
2263 {
2264 	struct scsi_device *sdev = to_scsi_device(dev);
2265 	struct myrb_hba *cb = shost_priv(sdev->host);
2266 	struct myrb_ldev_info *ldev_info = sdev->hostdata;
2267 	enum raid_state state = RAID_STATE_UNKNOWN;
2268 	unsigned short status;
2269 
2270 	if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2271 		state = RAID_STATE_UNKNOWN;
2272 	else {
2273 		status = myrb_get_rbld_progress(cb, NULL);
2274 		if (status == MYRB_STATUS_SUCCESS)
2275 			state = RAID_STATE_RESYNCING;
2276 		else {
2277 			switch (ldev_info->state) {
2278 			case MYRB_DEVICE_ONLINE:
2279 				state = RAID_STATE_ACTIVE;
2280 				break;
2281 			case MYRB_DEVICE_WO:
2282 			case MYRB_DEVICE_CRITICAL:
2283 				state = RAID_STATE_DEGRADED;
2284 				break;
2285 			default:
2286 				state = RAID_STATE_OFFLINE;
2287 			}
2288 		}
2289 	}
2290 	raid_set_state(myrb_raid_template, dev, state);
2291 }
2292 
2293 static struct raid_function_template myrb_raid_functions = {
2294 	.cookie		= &myrb_template,
2295 	.is_raid	= myrb_is_raid,
2296 	.get_resync	= myrb_get_resync,
2297 	.get_state	= myrb_get_state,
2298 };
2299 
myrb_handle_scsi(struct myrb_hba * cb,struct myrb_cmdblk * cmd_blk,struct scsi_cmnd * scmd)2300 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2301 		struct scsi_cmnd *scmd)
2302 {
2303 	unsigned short status;
2304 
2305 	if (!cmd_blk)
2306 		return;
2307 
2308 	scsi_dma_unmap(scmd);
2309 
2310 	if (cmd_blk->dcdb) {
2311 		memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2312 		dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2313 			      cmd_blk->dcdb_addr);
2314 		cmd_blk->dcdb = NULL;
2315 	}
2316 	if (cmd_blk->sgl) {
2317 		dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2318 		cmd_blk->sgl = NULL;
2319 		cmd_blk->sgl_addr = 0;
2320 	}
2321 	status = cmd_blk->status;
2322 	switch (status) {
2323 	case MYRB_STATUS_SUCCESS:
2324 	case MYRB_STATUS_DEVICE_BUSY:
2325 		scmd->result = (DID_OK << 16) | status;
2326 		break;
2327 	case MYRB_STATUS_BAD_DATA:
2328 		dev_dbg(&scmd->device->sdev_gendev,
2329 			"Bad Data Encountered\n");
2330 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2331 			/* Unrecovered read error */
2332 			scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
2333 		else
2334 			/* Write error */
2335 			scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
2336 		break;
2337 	case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2338 		scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2339 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2340 			/* Unrecovered read error, auto-reallocation failed */
2341 			scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
2342 		else
2343 			/* Write error, auto-reallocation failed */
2344 			scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
2345 		break;
2346 	case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2347 		dev_dbg(&scmd->device->sdev_gendev,
2348 			    "Logical Drive Nonexistent or Offline");
2349 		scmd->result = (DID_BAD_TARGET << 16);
2350 		break;
2351 	case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2352 		dev_dbg(&scmd->device->sdev_gendev,
2353 			    "Attempt to Access Beyond End of Logical Drive");
2354 		/* Logical block address out of range */
2355 		scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
2356 		break;
2357 	case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2358 		dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2359 		scmd->result = (DID_BAD_TARGET << 16);
2360 		break;
2361 	default:
2362 		scmd_printk(KERN_ERR, scmd,
2363 			    "Unexpected Error Status %04X", status);
2364 		scmd->result = (DID_ERROR << 16);
2365 		break;
2366 	}
2367 	scsi_done(scmd);
2368 }
2369 
myrb_handle_cmdblk(struct myrb_hba * cb,struct myrb_cmdblk * cmd_blk)2370 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2371 {
2372 	if (!cmd_blk)
2373 		return;
2374 
2375 	if (cmd_blk->completion) {
2376 		complete(cmd_blk->completion);
2377 		cmd_blk->completion = NULL;
2378 	}
2379 }
2380 
myrb_monitor(struct work_struct * work)2381 static void myrb_monitor(struct work_struct *work)
2382 {
2383 	struct myrb_hba *cb = container_of(work,
2384 			struct myrb_hba, monitor_work.work);
2385 	struct Scsi_Host *shost = cb->host;
2386 	unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2387 
2388 	dev_dbg(&shost->shost_gendev, "monitor tick\n");
2389 
2390 	if (cb->new_ev_seq > cb->old_ev_seq) {
2391 		int event = cb->old_ev_seq;
2392 
2393 		dev_dbg(&shost->shost_gendev,
2394 			"get event log no %d/%d\n",
2395 			cb->new_ev_seq, event);
2396 		myrb_get_event(cb, event);
2397 		cb->old_ev_seq = event + 1;
2398 		interval = 10;
2399 	} else if (cb->need_err_info) {
2400 		cb->need_err_info = false;
2401 		dev_dbg(&shost->shost_gendev, "get error table\n");
2402 		myrb_get_errtable(cb);
2403 		interval = 10;
2404 	} else if (cb->need_rbld && cb->rbld_first) {
2405 		cb->need_rbld = false;
2406 		dev_dbg(&shost->shost_gendev,
2407 			"get rebuild progress\n");
2408 		myrb_update_rbld_progress(cb);
2409 		interval = 10;
2410 	} else if (cb->need_ldev_info) {
2411 		cb->need_ldev_info = false;
2412 		dev_dbg(&shost->shost_gendev,
2413 			"get logical drive info\n");
2414 		myrb_get_ldev_info(cb);
2415 		interval = 10;
2416 	} else if (cb->need_rbld) {
2417 		cb->need_rbld = false;
2418 		dev_dbg(&shost->shost_gendev,
2419 			"get rebuild progress\n");
2420 		myrb_update_rbld_progress(cb);
2421 		interval = 10;
2422 	} else if (cb->need_cc_status) {
2423 		cb->need_cc_status = false;
2424 		dev_dbg(&shost->shost_gendev,
2425 			"get consistency check progress\n");
2426 		myrb_get_cc_progress(cb);
2427 		interval = 10;
2428 	} else if (cb->need_bgi_status) {
2429 		cb->need_bgi_status = false;
2430 		dev_dbg(&shost->shost_gendev, "get background init status\n");
2431 		myrb_bgi_control(cb);
2432 		interval = 10;
2433 	} else {
2434 		dev_dbg(&shost->shost_gendev, "new enquiry\n");
2435 		mutex_lock(&cb->dma_mutex);
2436 		myrb_hba_enquiry(cb);
2437 		mutex_unlock(&cb->dma_mutex);
2438 		if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2439 		    cb->need_err_info || cb->need_rbld ||
2440 		    cb->need_ldev_info || cb->need_cc_status ||
2441 		    cb->need_bgi_status) {
2442 			dev_dbg(&shost->shost_gendev,
2443 				"reschedule monitor\n");
2444 			interval = 0;
2445 		}
2446 	}
2447 	if (interval > 1)
2448 		cb->primary_monitor_time = jiffies;
2449 	queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2450 }
2451 
2452 /*
2453  * myrb_err_status - reports controller BIOS messages
2454  *
2455  * Controller BIOS messages are passed through the Error Status Register
2456  * when the driver performs the BIOS handshaking.
2457  *
2458  * Return: true for fatal errors and false otherwise.
2459  */
myrb_err_status(struct myrb_hba * cb,unsigned char error,unsigned char parm0,unsigned char parm1)2460 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2461 		unsigned char parm0, unsigned char parm1)
2462 {
2463 	struct pci_dev *pdev = cb->pdev;
2464 
2465 	switch (error) {
2466 	case 0x00:
2467 		dev_info(&pdev->dev,
2468 			 "Physical Device %d:%d Not Responding\n",
2469 			 parm1, parm0);
2470 		break;
2471 	case 0x08:
2472 		dev_notice(&pdev->dev, "Spinning Up Drives\n");
2473 		break;
2474 	case 0x30:
2475 		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2476 		break;
2477 	case 0x60:
2478 		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2479 		break;
2480 	case 0x70:
2481 		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2482 		break;
2483 	case 0x90:
2484 		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2485 			   parm1, parm0);
2486 		break;
2487 	case 0xA0:
2488 		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2489 		break;
2490 	case 0xB0:
2491 		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2492 		break;
2493 	case 0xD0:
2494 		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2495 		break;
2496 	case 0xF0:
2497 		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2498 		return true;
2499 	default:
2500 		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2501 			error);
2502 		return true;
2503 	}
2504 	return false;
2505 }
2506 
2507 /*
2508  * Hardware-specific functions
2509  */
2510 
2511 /*
2512  * DAC960 LA Series Controllers
2513  */
2514 
DAC960_LA_hw_mbox_new_cmd(void __iomem * base)2515 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2516 {
2517 	writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2518 }
2519 
DAC960_LA_ack_hw_mbox_status(void __iomem * base)2520 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2521 {
2522 	writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2523 }
2524 
DAC960_LA_reset_ctrl(void __iomem * base)2525 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2526 {
2527 	writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2528 }
2529 
DAC960_LA_mem_mbox_new_cmd(void __iomem * base)2530 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2531 {
2532 	writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2533 }
2534 
DAC960_LA_hw_mbox_is_full(void __iomem * base)2535 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2536 {
2537 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2538 
2539 	return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2540 }
2541 
DAC960_LA_init_in_progress(void __iomem * base)2542 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2543 {
2544 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2545 
2546 	return !(idb & DAC960_LA_IDB_INIT_DONE);
2547 }
2548 
DAC960_LA_ack_hw_mbox_intr(void __iomem * base)2549 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2550 {
2551 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2552 }
2553 
DAC960_LA_ack_intr(void __iomem * base)2554 static inline void DAC960_LA_ack_intr(void __iomem *base)
2555 {
2556 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2557 	       base + DAC960_LA_ODB_OFFSET);
2558 }
2559 
DAC960_LA_hw_mbox_status_available(void __iomem * base)2560 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2561 {
2562 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2563 
2564 	return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2565 }
2566 
DAC960_LA_enable_intr(void __iomem * base)2567 static inline void DAC960_LA_enable_intr(void __iomem *base)
2568 {
2569 	unsigned char odb = 0xFF;
2570 
2571 	odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2572 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2573 }
2574 
DAC960_LA_disable_intr(void __iomem * base)2575 static inline void DAC960_LA_disable_intr(void __iomem *base)
2576 {
2577 	unsigned char odb = 0xFF;
2578 
2579 	odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2580 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2581 }
2582 
DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox * mem_mbox,union myrb_cmd_mbox * mbox)2583 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2584 		union myrb_cmd_mbox *mbox)
2585 {
2586 	mem_mbox->words[1] = mbox->words[1];
2587 	mem_mbox->words[2] = mbox->words[2];
2588 	mem_mbox->words[3] = mbox->words[3];
2589 	/* Memory barrier to prevent reordering */
2590 	wmb();
2591 	mem_mbox->words[0] = mbox->words[0];
2592 	/* Memory barrier to force PCI access */
2593 	mb();
2594 }
2595 
DAC960_LA_write_hw_mbox(void __iomem * base,union myrb_cmd_mbox * mbox)2596 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2597 		union myrb_cmd_mbox *mbox)
2598 {
2599 	writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2600 	writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2601 	writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2602 	writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2603 }
2604 
DAC960_LA_read_status(void __iomem * base)2605 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2606 {
2607 	return readw(base + DAC960_LA_STS_OFFSET);
2608 }
2609 
2610 static inline bool
DAC960_LA_read_error_status(void __iomem * base,unsigned char * error,unsigned char * param0,unsigned char * param1)2611 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2612 		unsigned char *param0, unsigned char *param1)
2613 {
2614 	unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2615 
2616 	if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2617 		return false;
2618 	errsts &= ~DAC960_LA_ERRSTS_PENDING;
2619 
2620 	*error = errsts;
2621 	*param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2622 	*param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2623 	writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2624 	return true;
2625 }
2626 
2627 static inline unsigned short
DAC960_LA_mbox_init(struct pci_dev * pdev,void __iomem * base,union myrb_cmd_mbox * mbox)2628 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2629 		union myrb_cmd_mbox *mbox)
2630 {
2631 	unsigned short status;
2632 	int timeout = 0;
2633 
2634 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2635 		if (!DAC960_LA_hw_mbox_is_full(base))
2636 			break;
2637 		udelay(10);
2638 		timeout++;
2639 	}
2640 	if (DAC960_LA_hw_mbox_is_full(base)) {
2641 		dev_err(&pdev->dev,
2642 			"Timeout waiting for empty mailbox\n");
2643 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2644 	}
2645 	DAC960_LA_write_hw_mbox(base, mbox);
2646 	DAC960_LA_hw_mbox_new_cmd(base);
2647 	timeout = 0;
2648 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2649 		if (DAC960_LA_hw_mbox_status_available(base))
2650 			break;
2651 		udelay(10);
2652 		timeout++;
2653 	}
2654 	if (!DAC960_LA_hw_mbox_status_available(base)) {
2655 		dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2656 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2657 	}
2658 	status = DAC960_LA_read_status(base);
2659 	DAC960_LA_ack_hw_mbox_intr(base);
2660 	DAC960_LA_ack_hw_mbox_status(base);
2661 
2662 	return status;
2663 }
2664 
DAC960_LA_hw_init(struct pci_dev * pdev,struct myrb_hba * cb,void __iomem * base)2665 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2666 		struct myrb_hba *cb, void __iomem *base)
2667 {
2668 	int timeout = 0;
2669 	unsigned char error, parm0, parm1;
2670 
2671 	DAC960_LA_disable_intr(base);
2672 	DAC960_LA_ack_hw_mbox_status(base);
2673 	udelay(1000);
2674 	while (DAC960_LA_init_in_progress(base) &&
2675 	       timeout < MYRB_MAILBOX_TIMEOUT) {
2676 		if (DAC960_LA_read_error_status(base, &error,
2677 					      &parm0, &parm1) &&
2678 		    myrb_err_status(cb, error, parm0, parm1))
2679 			return -ENODEV;
2680 		udelay(10);
2681 		timeout++;
2682 	}
2683 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2684 		dev_err(&pdev->dev,
2685 			"Timeout waiting for Controller Initialisation\n");
2686 		return -ETIMEDOUT;
2687 	}
2688 	if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2689 		dev_err(&pdev->dev,
2690 			"Unable to Enable Memory Mailbox Interface\n");
2691 		DAC960_LA_reset_ctrl(base);
2692 		return -ENODEV;
2693 	}
2694 	DAC960_LA_enable_intr(base);
2695 	cb->qcmd = myrb_qcmd;
2696 	cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2697 	if (cb->dual_mode_interface)
2698 		cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2699 	else
2700 		cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2701 	cb->disable_intr = DAC960_LA_disable_intr;
2702 	cb->reset = DAC960_LA_reset_ctrl;
2703 
2704 	return 0;
2705 }
2706 
DAC960_LA_intr_handler(int irq,void * arg)2707 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2708 {
2709 	struct myrb_hba *cb = arg;
2710 	void __iomem *base = cb->io_base;
2711 	struct myrb_stat_mbox *next_stat_mbox;
2712 	unsigned long flags;
2713 
2714 	spin_lock_irqsave(&cb->queue_lock, flags);
2715 	DAC960_LA_ack_intr(base);
2716 	next_stat_mbox = cb->next_stat_mbox;
2717 	while (next_stat_mbox->valid) {
2718 		unsigned char id = next_stat_mbox->id;
2719 		struct scsi_cmnd *scmd = NULL;
2720 		struct myrb_cmdblk *cmd_blk = NULL;
2721 
2722 		if (id == MYRB_DCMD_TAG)
2723 			cmd_blk = &cb->dcmd_blk;
2724 		else if (id == MYRB_MCMD_TAG)
2725 			cmd_blk = &cb->mcmd_blk;
2726 		else {
2727 			scmd = scsi_host_find_tag(cb->host, id - 3);
2728 			if (scmd)
2729 				cmd_blk = scsi_cmd_priv(scmd);
2730 		}
2731 		if (cmd_blk)
2732 			cmd_blk->status = next_stat_mbox->status;
2733 		else
2734 			dev_err(&cb->pdev->dev,
2735 				"Unhandled command completion %d\n", id);
2736 
2737 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2738 		if (++next_stat_mbox > cb->last_stat_mbox)
2739 			next_stat_mbox = cb->first_stat_mbox;
2740 
2741 		if (cmd_blk) {
2742 			if (id < 3)
2743 				myrb_handle_cmdblk(cb, cmd_blk);
2744 			else
2745 				myrb_handle_scsi(cb, cmd_blk, scmd);
2746 		}
2747 	}
2748 	cb->next_stat_mbox = next_stat_mbox;
2749 	spin_unlock_irqrestore(&cb->queue_lock, flags);
2750 	return IRQ_HANDLED;
2751 }
2752 
2753 static struct myrb_privdata DAC960_LA_privdata = {
2754 	.hw_init =	DAC960_LA_hw_init,
2755 	.irq_handler =	DAC960_LA_intr_handler,
2756 	.mmio_size =	DAC960_LA_mmio_size,
2757 };
2758 
2759 /*
2760  * DAC960 PG Series Controllers
2761  */
DAC960_PG_hw_mbox_new_cmd(void __iomem * base)2762 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2763 {
2764 	writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2765 }
2766 
DAC960_PG_ack_hw_mbox_status(void __iomem * base)2767 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2768 {
2769 	writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2770 }
2771 
DAC960_PG_reset_ctrl(void __iomem * base)2772 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2773 {
2774 	writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2775 }
2776 
DAC960_PG_mem_mbox_new_cmd(void __iomem * base)2777 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2778 {
2779 	writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2780 }
2781 
DAC960_PG_hw_mbox_is_full(void __iomem * base)2782 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2783 {
2784 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2785 
2786 	return idb & DAC960_PG_IDB_HWMBOX_FULL;
2787 }
2788 
DAC960_PG_init_in_progress(void __iomem * base)2789 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2790 {
2791 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2792 
2793 	return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2794 }
2795 
DAC960_PG_ack_hw_mbox_intr(void __iomem * base)2796 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2797 {
2798 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2799 }
2800 
DAC960_PG_ack_intr(void __iomem * base)2801 static inline void DAC960_PG_ack_intr(void __iomem *base)
2802 {
2803 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2804 	       base + DAC960_PG_ODB_OFFSET);
2805 }
2806 
DAC960_PG_hw_mbox_status_available(void __iomem * base)2807 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2808 {
2809 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2810 
2811 	return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2812 }
2813 
DAC960_PG_enable_intr(void __iomem * base)2814 static inline void DAC960_PG_enable_intr(void __iomem *base)
2815 {
2816 	unsigned int imask = (unsigned int)-1;
2817 
2818 	imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2819 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2820 }
2821 
DAC960_PG_disable_intr(void __iomem * base)2822 static inline void DAC960_PG_disable_intr(void __iomem *base)
2823 {
2824 	unsigned int imask = (unsigned int)-1;
2825 
2826 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2827 }
2828 
DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox * mem_mbox,union myrb_cmd_mbox * mbox)2829 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2830 		union myrb_cmd_mbox *mbox)
2831 {
2832 	mem_mbox->words[1] = mbox->words[1];
2833 	mem_mbox->words[2] = mbox->words[2];
2834 	mem_mbox->words[3] = mbox->words[3];
2835 	/* Memory barrier to prevent reordering */
2836 	wmb();
2837 	mem_mbox->words[0] = mbox->words[0];
2838 	/* Memory barrier to force PCI access */
2839 	mb();
2840 }
2841 
DAC960_PG_write_hw_mbox(void __iomem * base,union myrb_cmd_mbox * mbox)2842 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2843 		union myrb_cmd_mbox *mbox)
2844 {
2845 	writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2846 	writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2847 	writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2848 	writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2849 }
2850 
2851 static inline unsigned short
DAC960_PG_read_status(void __iomem * base)2852 DAC960_PG_read_status(void __iomem *base)
2853 {
2854 	return readw(base + DAC960_PG_STS_OFFSET);
2855 }
2856 
2857 static inline bool
DAC960_PG_read_error_status(void __iomem * base,unsigned char * error,unsigned char * param0,unsigned char * param1)2858 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2859 		unsigned char *param0, unsigned char *param1)
2860 {
2861 	unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2862 
2863 	if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2864 		return false;
2865 	errsts &= ~DAC960_PG_ERRSTS_PENDING;
2866 	*error = errsts;
2867 	*param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2868 	*param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2869 	writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2870 	return true;
2871 }
2872 
2873 static inline unsigned short
DAC960_PG_mbox_init(struct pci_dev * pdev,void __iomem * base,union myrb_cmd_mbox * mbox)2874 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2875 		union myrb_cmd_mbox *mbox)
2876 {
2877 	unsigned short status;
2878 	int timeout = 0;
2879 
2880 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2881 		if (!DAC960_PG_hw_mbox_is_full(base))
2882 			break;
2883 		udelay(10);
2884 		timeout++;
2885 	}
2886 	if (DAC960_PG_hw_mbox_is_full(base)) {
2887 		dev_err(&pdev->dev,
2888 			"Timeout waiting for empty mailbox\n");
2889 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2890 	}
2891 	DAC960_PG_write_hw_mbox(base, mbox);
2892 	DAC960_PG_hw_mbox_new_cmd(base);
2893 
2894 	timeout = 0;
2895 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2896 		if (DAC960_PG_hw_mbox_status_available(base))
2897 			break;
2898 		udelay(10);
2899 		timeout++;
2900 	}
2901 	if (!DAC960_PG_hw_mbox_status_available(base)) {
2902 		dev_err(&pdev->dev,
2903 			"Timeout waiting for mailbox status\n");
2904 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2905 	}
2906 	status = DAC960_PG_read_status(base);
2907 	DAC960_PG_ack_hw_mbox_intr(base);
2908 	DAC960_PG_ack_hw_mbox_status(base);
2909 
2910 	return status;
2911 }
2912 
DAC960_PG_hw_init(struct pci_dev * pdev,struct myrb_hba * cb,void __iomem * base)2913 static int DAC960_PG_hw_init(struct pci_dev *pdev,
2914 		struct myrb_hba *cb, void __iomem *base)
2915 {
2916 	int timeout = 0;
2917 	unsigned char error, parm0, parm1;
2918 
2919 	DAC960_PG_disable_intr(base);
2920 	DAC960_PG_ack_hw_mbox_status(base);
2921 	udelay(1000);
2922 	while (DAC960_PG_init_in_progress(base) &&
2923 	       timeout < MYRB_MAILBOX_TIMEOUT) {
2924 		if (DAC960_PG_read_error_status(base, &error,
2925 						&parm0, &parm1) &&
2926 		    myrb_err_status(cb, error, parm0, parm1))
2927 			return -EIO;
2928 		udelay(10);
2929 		timeout++;
2930 	}
2931 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2932 		dev_err(&pdev->dev,
2933 			"Timeout waiting for Controller Initialisation\n");
2934 		return -ETIMEDOUT;
2935 	}
2936 	if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
2937 		dev_err(&pdev->dev,
2938 			"Unable to Enable Memory Mailbox Interface\n");
2939 		DAC960_PG_reset_ctrl(base);
2940 		return -ENODEV;
2941 	}
2942 	DAC960_PG_enable_intr(base);
2943 	cb->qcmd = myrb_qcmd;
2944 	cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
2945 	if (cb->dual_mode_interface)
2946 		cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
2947 	else
2948 		cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
2949 	cb->disable_intr = DAC960_PG_disable_intr;
2950 	cb->reset = DAC960_PG_reset_ctrl;
2951 
2952 	return 0;
2953 }
2954 
DAC960_PG_intr_handler(int irq,void * arg)2955 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
2956 {
2957 	struct myrb_hba *cb = arg;
2958 	void __iomem *base = cb->io_base;
2959 	struct myrb_stat_mbox *next_stat_mbox;
2960 	unsigned long flags;
2961 
2962 	spin_lock_irqsave(&cb->queue_lock, flags);
2963 	DAC960_PG_ack_intr(base);
2964 	next_stat_mbox = cb->next_stat_mbox;
2965 	while (next_stat_mbox->valid) {
2966 		unsigned char id = next_stat_mbox->id;
2967 		struct scsi_cmnd *scmd = NULL;
2968 		struct myrb_cmdblk *cmd_blk = NULL;
2969 
2970 		if (id == MYRB_DCMD_TAG)
2971 			cmd_blk = &cb->dcmd_blk;
2972 		else if (id == MYRB_MCMD_TAG)
2973 			cmd_blk = &cb->mcmd_blk;
2974 		else {
2975 			scmd = scsi_host_find_tag(cb->host, id - 3);
2976 			if (scmd)
2977 				cmd_blk = scsi_cmd_priv(scmd);
2978 		}
2979 		if (cmd_blk)
2980 			cmd_blk->status = next_stat_mbox->status;
2981 		else
2982 			dev_err(&cb->pdev->dev,
2983 				"Unhandled command completion %d\n", id);
2984 
2985 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2986 		if (++next_stat_mbox > cb->last_stat_mbox)
2987 			next_stat_mbox = cb->first_stat_mbox;
2988 
2989 		if (id < 3)
2990 			myrb_handle_cmdblk(cb, cmd_blk);
2991 		else
2992 			myrb_handle_scsi(cb, cmd_blk, scmd);
2993 	}
2994 	cb->next_stat_mbox = next_stat_mbox;
2995 	spin_unlock_irqrestore(&cb->queue_lock, flags);
2996 	return IRQ_HANDLED;
2997 }
2998 
2999 static struct myrb_privdata DAC960_PG_privdata = {
3000 	.hw_init =	DAC960_PG_hw_init,
3001 	.irq_handler =	DAC960_PG_intr_handler,
3002 	.mmio_size =	DAC960_PG_mmio_size,
3003 };
3004 
3005 
3006 /*
3007  * DAC960 PD Series Controllers
3008  */
3009 
DAC960_PD_hw_mbox_new_cmd(void __iomem * base)3010 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3011 {
3012 	writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3013 }
3014 
DAC960_PD_ack_hw_mbox_status(void __iomem * base)3015 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3016 {
3017 	writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3018 }
3019 
DAC960_PD_reset_ctrl(void __iomem * base)3020 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3021 {
3022 	writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3023 }
3024 
DAC960_PD_hw_mbox_is_full(void __iomem * base)3025 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3026 {
3027 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3028 
3029 	return idb & DAC960_PD_IDB_HWMBOX_FULL;
3030 }
3031 
DAC960_PD_init_in_progress(void __iomem * base)3032 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3033 {
3034 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3035 
3036 	return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3037 }
3038 
DAC960_PD_ack_intr(void __iomem * base)3039 static inline void DAC960_PD_ack_intr(void __iomem *base)
3040 {
3041 	writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3042 }
3043 
DAC960_PD_hw_mbox_status_available(void __iomem * base)3044 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3045 {
3046 	unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3047 
3048 	return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3049 }
3050 
DAC960_PD_enable_intr(void __iomem * base)3051 static inline void DAC960_PD_enable_intr(void __iomem *base)
3052 {
3053 	writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3054 }
3055 
DAC960_PD_disable_intr(void __iomem * base)3056 static inline void DAC960_PD_disable_intr(void __iomem *base)
3057 {
3058 	writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3059 }
3060 
DAC960_PD_write_cmd_mbox(void __iomem * base,union myrb_cmd_mbox * mbox)3061 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3062 		union myrb_cmd_mbox *mbox)
3063 {
3064 	writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3065 	writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3066 	writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3067 	writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3068 }
3069 
3070 static inline unsigned char
DAC960_PD_read_status_cmd_ident(void __iomem * base)3071 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3072 {
3073 	return readb(base + DAC960_PD_STSID_OFFSET);
3074 }
3075 
3076 static inline unsigned short
DAC960_PD_read_status(void __iomem * base)3077 DAC960_PD_read_status(void __iomem *base)
3078 {
3079 	return readw(base + DAC960_PD_STS_OFFSET);
3080 }
3081 
3082 static inline bool
DAC960_PD_read_error_status(void __iomem * base,unsigned char * error,unsigned char * param0,unsigned char * param1)3083 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3084 		unsigned char *param0, unsigned char *param1)
3085 {
3086 	unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3087 
3088 	if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3089 		return false;
3090 	errsts &= ~DAC960_PD_ERRSTS_PENDING;
3091 	*error = errsts;
3092 	*param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3093 	*param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3094 	writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3095 	return true;
3096 }
3097 
DAC960_PD_qcmd(struct myrb_hba * cb,struct myrb_cmdblk * cmd_blk)3098 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3099 {
3100 	void __iomem *base = cb->io_base;
3101 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3102 
3103 	while (DAC960_PD_hw_mbox_is_full(base))
3104 		udelay(1);
3105 	DAC960_PD_write_cmd_mbox(base, mbox);
3106 	DAC960_PD_hw_mbox_new_cmd(base);
3107 }
3108 
DAC960_PD_hw_init(struct pci_dev * pdev,struct myrb_hba * cb,void __iomem * base)3109 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3110 		struct myrb_hba *cb, void __iomem *base)
3111 {
3112 	int timeout = 0;
3113 	unsigned char error, parm0, parm1;
3114 
3115 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3116 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3117 			(unsigned long)cb->io_addr);
3118 		return -EBUSY;
3119 	}
3120 	DAC960_PD_disable_intr(base);
3121 	DAC960_PD_ack_hw_mbox_status(base);
3122 	udelay(1000);
3123 	while (DAC960_PD_init_in_progress(base) &&
3124 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3125 		if (DAC960_PD_read_error_status(base, &error,
3126 					      &parm0, &parm1) &&
3127 		    myrb_err_status(cb, error, parm0, parm1))
3128 			return -EIO;
3129 		udelay(10);
3130 		timeout++;
3131 	}
3132 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3133 		dev_err(&pdev->dev,
3134 			"Timeout waiting for Controller Initialisation\n");
3135 		return -ETIMEDOUT;
3136 	}
3137 	if (!myrb_enable_mmio(cb, NULL)) {
3138 		dev_err(&pdev->dev,
3139 			"Unable to Enable Memory Mailbox Interface\n");
3140 		DAC960_PD_reset_ctrl(base);
3141 		return -ENODEV;
3142 	}
3143 	DAC960_PD_enable_intr(base);
3144 	cb->qcmd = DAC960_PD_qcmd;
3145 	cb->disable_intr = DAC960_PD_disable_intr;
3146 	cb->reset = DAC960_PD_reset_ctrl;
3147 
3148 	return 0;
3149 }
3150 
DAC960_PD_intr_handler(int irq,void * arg)3151 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3152 {
3153 	struct myrb_hba *cb = arg;
3154 	void __iomem *base = cb->io_base;
3155 	unsigned long flags;
3156 
3157 	spin_lock_irqsave(&cb->queue_lock, flags);
3158 	while (DAC960_PD_hw_mbox_status_available(base)) {
3159 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3160 		struct scsi_cmnd *scmd = NULL;
3161 		struct myrb_cmdblk *cmd_blk = NULL;
3162 
3163 		if (id == MYRB_DCMD_TAG)
3164 			cmd_blk = &cb->dcmd_blk;
3165 		else if (id == MYRB_MCMD_TAG)
3166 			cmd_blk = &cb->mcmd_blk;
3167 		else {
3168 			scmd = scsi_host_find_tag(cb->host, id - 3);
3169 			if (scmd)
3170 				cmd_blk = scsi_cmd_priv(scmd);
3171 		}
3172 		if (cmd_blk)
3173 			cmd_blk->status = DAC960_PD_read_status(base);
3174 		else
3175 			dev_err(&cb->pdev->dev,
3176 				"Unhandled command completion %d\n", id);
3177 
3178 		DAC960_PD_ack_intr(base);
3179 		DAC960_PD_ack_hw_mbox_status(base);
3180 
3181 		if (id < 3)
3182 			myrb_handle_cmdblk(cb, cmd_blk);
3183 		else
3184 			myrb_handle_scsi(cb, cmd_blk, scmd);
3185 	}
3186 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3187 	return IRQ_HANDLED;
3188 }
3189 
3190 static struct myrb_privdata DAC960_PD_privdata = {
3191 	.hw_init =	DAC960_PD_hw_init,
3192 	.irq_handler =	DAC960_PD_intr_handler,
3193 	.mmio_size =	DAC960_PD_mmio_size,
3194 };
3195 
3196 
3197 /*
3198  * DAC960 P Series Controllers
3199  *
3200  * Similar to the DAC960 PD Series Controllers, but some commands have
3201  * to be translated.
3202  */
3203 
myrb_translate_enquiry(void * enq)3204 static inline void myrb_translate_enquiry(void *enq)
3205 {
3206 	memcpy(enq + 132, enq + 36, 64);
3207 	memset(enq + 36, 0, 96);
3208 }
3209 
myrb_translate_devstate(void * state)3210 static inline void myrb_translate_devstate(void *state)
3211 {
3212 	memcpy(state + 2, state + 3, 1);
3213 	memmove(state + 4, state + 5, 2);
3214 	memmove(state + 6, state + 8, 4);
3215 }
3216 
myrb_translate_to_rw_command(struct myrb_cmdblk * cmd_blk)3217 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3218 {
3219 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3220 	int ldev_num = mbox->type5.ld.ldev_num;
3221 
3222 	mbox->bytes[3] &= 0x7;
3223 	mbox->bytes[3] |= mbox->bytes[7] << 6;
3224 	mbox->bytes[7] = ldev_num;
3225 }
3226 
myrb_translate_from_rw_command(struct myrb_cmdblk * cmd_blk)3227 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3228 {
3229 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3230 	int ldev_num = mbox->bytes[7];
3231 
3232 	mbox->bytes[7] = mbox->bytes[3] >> 6;
3233 	mbox->bytes[3] &= 0x7;
3234 	mbox->bytes[3] |= ldev_num << 3;
3235 }
3236 
DAC960_P_qcmd(struct myrb_hba * cb,struct myrb_cmdblk * cmd_blk)3237 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3238 {
3239 	void __iomem *base = cb->io_base;
3240 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3241 
3242 	switch (mbox->common.opcode) {
3243 	case MYRB_CMD_ENQUIRY:
3244 		mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3245 		break;
3246 	case MYRB_CMD_GET_DEVICE_STATE:
3247 		mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3248 		break;
3249 	case MYRB_CMD_READ:
3250 		mbox->common.opcode = MYRB_CMD_READ_OLD;
3251 		myrb_translate_to_rw_command(cmd_blk);
3252 		break;
3253 	case MYRB_CMD_WRITE:
3254 		mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3255 		myrb_translate_to_rw_command(cmd_blk);
3256 		break;
3257 	case MYRB_CMD_READ_SG:
3258 		mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3259 		myrb_translate_to_rw_command(cmd_blk);
3260 		break;
3261 	case MYRB_CMD_WRITE_SG:
3262 		mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3263 		myrb_translate_to_rw_command(cmd_blk);
3264 		break;
3265 	default:
3266 		break;
3267 	}
3268 	while (DAC960_PD_hw_mbox_is_full(base))
3269 		udelay(1);
3270 	DAC960_PD_write_cmd_mbox(base, mbox);
3271 	DAC960_PD_hw_mbox_new_cmd(base);
3272 }
3273 
3274 
DAC960_P_hw_init(struct pci_dev * pdev,struct myrb_hba * cb,void __iomem * base)3275 static int DAC960_P_hw_init(struct pci_dev *pdev,
3276 		struct myrb_hba *cb, void __iomem *base)
3277 {
3278 	int timeout = 0;
3279 	unsigned char error, parm0, parm1;
3280 
3281 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3282 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3283 			(unsigned long)cb->io_addr);
3284 		return -EBUSY;
3285 	}
3286 	DAC960_PD_disable_intr(base);
3287 	DAC960_PD_ack_hw_mbox_status(base);
3288 	udelay(1000);
3289 	while (DAC960_PD_init_in_progress(base) &&
3290 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3291 		if (DAC960_PD_read_error_status(base, &error,
3292 						&parm0, &parm1) &&
3293 		    myrb_err_status(cb, error, parm0, parm1))
3294 			return -EAGAIN;
3295 		udelay(10);
3296 		timeout++;
3297 	}
3298 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3299 		dev_err(&pdev->dev,
3300 			"Timeout waiting for Controller Initialisation\n");
3301 		return -ETIMEDOUT;
3302 	}
3303 	if (!myrb_enable_mmio(cb, NULL)) {
3304 		dev_err(&pdev->dev,
3305 			"Unable to allocate DMA mapped memory\n");
3306 		DAC960_PD_reset_ctrl(base);
3307 		return -ETIMEDOUT;
3308 	}
3309 	DAC960_PD_enable_intr(base);
3310 	cb->qcmd = DAC960_P_qcmd;
3311 	cb->disable_intr = DAC960_PD_disable_intr;
3312 	cb->reset = DAC960_PD_reset_ctrl;
3313 
3314 	return 0;
3315 }
3316 
DAC960_P_intr_handler(int irq,void * arg)3317 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3318 {
3319 	struct myrb_hba *cb = arg;
3320 	void __iomem *base = cb->io_base;
3321 	unsigned long flags;
3322 
3323 	spin_lock_irqsave(&cb->queue_lock, flags);
3324 	while (DAC960_PD_hw_mbox_status_available(base)) {
3325 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3326 		struct scsi_cmnd *scmd = NULL;
3327 		struct myrb_cmdblk *cmd_blk = NULL;
3328 		union myrb_cmd_mbox *mbox;
3329 		enum myrb_cmd_opcode op;
3330 
3331 
3332 		if (id == MYRB_DCMD_TAG)
3333 			cmd_blk = &cb->dcmd_blk;
3334 		else if (id == MYRB_MCMD_TAG)
3335 			cmd_blk = &cb->mcmd_blk;
3336 		else {
3337 			scmd = scsi_host_find_tag(cb->host, id - 3);
3338 			if (scmd)
3339 				cmd_blk = scsi_cmd_priv(scmd);
3340 		}
3341 		if (cmd_blk)
3342 			cmd_blk->status = DAC960_PD_read_status(base);
3343 		else
3344 			dev_err(&cb->pdev->dev,
3345 				"Unhandled command completion %d\n", id);
3346 
3347 		DAC960_PD_ack_intr(base);
3348 		DAC960_PD_ack_hw_mbox_status(base);
3349 
3350 		if (!cmd_blk)
3351 			continue;
3352 
3353 		mbox = &cmd_blk->mbox;
3354 		op = mbox->common.opcode;
3355 		switch (op) {
3356 		case MYRB_CMD_ENQUIRY_OLD:
3357 			mbox->common.opcode = MYRB_CMD_ENQUIRY;
3358 			myrb_translate_enquiry(cb->enquiry);
3359 			break;
3360 		case MYRB_CMD_READ_OLD:
3361 			mbox->common.opcode = MYRB_CMD_READ;
3362 			myrb_translate_from_rw_command(cmd_blk);
3363 			break;
3364 		case MYRB_CMD_WRITE_OLD:
3365 			mbox->common.opcode = MYRB_CMD_WRITE;
3366 			myrb_translate_from_rw_command(cmd_blk);
3367 			break;
3368 		case MYRB_CMD_READ_SG_OLD:
3369 			mbox->common.opcode = MYRB_CMD_READ_SG;
3370 			myrb_translate_from_rw_command(cmd_blk);
3371 			break;
3372 		case MYRB_CMD_WRITE_SG_OLD:
3373 			mbox->common.opcode = MYRB_CMD_WRITE_SG;
3374 			myrb_translate_from_rw_command(cmd_blk);
3375 			break;
3376 		default:
3377 			break;
3378 		}
3379 		if (id < 3)
3380 			myrb_handle_cmdblk(cb, cmd_blk);
3381 		else
3382 			myrb_handle_scsi(cb, cmd_blk, scmd);
3383 	}
3384 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3385 	return IRQ_HANDLED;
3386 }
3387 
3388 static struct myrb_privdata DAC960_P_privdata = {
3389 	.hw_init =	DAC960_P_hw_init,
3390 	.irq_handler =	DAC960_P_intr_handler,
3391 	.mmio_size =	DAC960_PD_mmio_size,
3392 };
3393 
myrb_detect(struct pci_dev * pdev,const struct pci_device_id * entry)3394 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3395 		const struct pci_device_id *entry)
3396 {
3397 	struct myrb_privdata *privdata =
3398 		(struct myrb_privdata *)entry->driver_data;
3399 	irq_handler_t irq_handler = privdata->irq_handler;
3400 	unsigned int mmio_size = privdata->mmio_size;
3401 	struct Scsi_Host *shost;
3402 	struct myrb_hba *cb = NULL;
3403 
3404 	shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3405 	if (!shost) {
3406 		dev_err(&pdev->dev, "Unable to allocate Controller\n");
3407 		return NULL;
3408 	}
3409 	shost->max_cmd_len = 12;
3410 	shost->max_lun = 256;
3411 	cb = shost_priv(shost);
3412 	mutex_init(&cb->dcmd_mutex);
3413 	mutex_init(&cb->dma_mutex);
3414 	cb->pdev = pdev;
3415 	cb->host = shost;
3416 
3417 	if (pci_enable_device(pdev)) {
3418 		dev_err(&pdev->dev, "Failed to enable PCI device\n");
3419 		scsi_host_put(shost);
3420 		return NULL;
3421 	}
3422 
3423 	if (privdata->hw_init == DAC960_PD_hw_init ||
3424 	    privdata->hw_init == DAC960_P_hw_init) {
3425 		cb->io_addr = pci_resource_start(pdev, 0);
3426 		cb->pci_addr = pci_resource_start(pdev, 1);
3427 	} else
3428 		cb->pci_addr = pci_resource_start(pdev, 0);
3429 
3430 	pci_set_drvdata(pdev, cb);
3431 	spin_lock_init(&cb->queue_lock);
3432 	if (mmio_size < PAGE_SIZE)
3433 		mmio_size = PAGE_SIZE;
3434 	cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3435 	if (cb->mmio_base == NULL) {
3436 		dev_err(&pdev->dev,
3437 			"Unable to map Controller Register Window\n");
3438 		goto failure;
3439 	}
3440 
3441 	cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3442 	if (privdata->hw_init(pdev, cb, cb->io_base))
3443 		goto failure;
3444 
3445 	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3446 		dev_err(&pdev->dev,
3447 			"Unable to acquire IRQ Channel %d\n", pdev->irq);
3448 		goto failure;
3449 	}
3450 	cb->irq = pdev->irq;
3451 	return cb;
3452 
3453 failure:
3454 	dev_err(&pdev->dev,
3455 		"Failed to initialize Controller\n");
3456 	myrb_cleanup(cb);
3457 	return NULL;
3458 }
3459 
myrb_probe(struct pci_dev * dev,const struct pci_device_id * entry)3460 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3461 {
3462 	struct myrb_hba *cb;
3463 	int ret;
3464 
3465 	cb = myrb_detect(dev, entry);
3466 	if (!cb)
3467 		return -ENODEV;
3468 
3469 	ret = myrb_get_hba_config(cb);
3470 	if (ret < 0) {
3471 		myrb_cleanup(cb);
3472 		return ret;
3473 	}
3474 
3475 	if (!myrb_create_mempools(dev, cb)) {
3476 		ret = -ENOMEM;
3477 		goto failed;
3478 	}
3479 
3480 	ret = scsi_add_host(cb->host, &dev->dev);
3481 	if (ret) {
3482 		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3483 		myrb_destroy_mempools(cb);
3484 		goto failed;
3485 	}
3486 	scsi_scan_host(cb->host);
3487 	return 0;
3488 failed:
3489 	myrb_cleanup(cb);
3490 	return ret;
3491 }
3492 
3493 
myrb_remove(struct pci_dev * pdev)3494 static void myrb_remove(struct pci_dev *pdev)
3495 {
3496 	struct myrb_hba *cb = pci_get_drvdata(pdev);
3497 
3498 	shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3499 	myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3500 	myrb_cleanup(cb);
3501 	myrb_destroy_mempools(cb);
3502 }
3503 
3504 
3505 static const struct pci_device_id myrb_id_table[] = {
3506 	{
3507 		PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3508 			       PCI_DEVICE_ID_DEC_21285,
3509 			       PCI_VENDOR_ID_MYLEX,
3510 			       PCI_DEVICE_ID_MYLEX_DAC960_LA),
3511 		.driver_data	= (unsigned long) &DAC960_LA_privdata,
3512 	},
3513 	{
3514 		PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3515 	},
3516 	{
3517 		PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3518 	},
3519 	{
3520 		PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3521 	},
3522 	{0, },
3523 };
3524 
3525 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3526 
3527 static struct pci_driver myrb_pci_driver = {
3528 	.name		= "myrb",
3529 	.id_table	= myrb_id_table,
3530 	.probe		= myrb_probe,
3531 	.remove		= myrb_remove,
3532 };
3533 
myrb_init_module(void)3534 static int __init myrb_init_module(void)
3535 {
3536 	int ret;
3537 
3538 	myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3539 	if (!myrb_raid_template)
3540 		return -ENODEV;
3541 
3542 	ret = pci_register_driver(&myrb_pci_driver);
3543 	if (ret)
3544 		raid_class_release(myrb_raid_template);
3545 
3546 	return ret;
3547 }
3548 
myrb_cleanup_module(void)3549 static void __exit myrb_cleanup_module(void)
3550 {
3551 	pci_unregister_driver(&myrb_pci_driver);
3552 	raid_class_release(myrb_raid_template);
3553 }
3554 
3555 module_init(myrb_init_module);
3556 module_exit(myrb_cleanup_module);
3557 
3558 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3559 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3560 MODULE_LICENSE("GPL");
3561