xref: /linux/drivers/scsi/isci/host.c (revision f1f52e75939b56c40b3d153ae99faf2720250242)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 #include <linux/device.h>
56 #include <scsi/sas.h>
57 #include "host.h"
58 #include "isci.h"
59 #include "port.h"
60 #include "host.h"
61 #include "probe_roms.h"
62 #include "remote_device.h"
63 #include "request.h"
64 #include "scic_sds_port_configuration_agent.h"
65 #include "scu_completion_codes.h"
66 #include "scu_event_codes.h"
67 #include "registers.h"
68 #include "scu_remote_node_context.h"
69 #include "scu_task_context.h"
70 #include "scu_unsolicited_frame.h"
71 #include "timers.h"
72 
73 #define SCU_CONTEXT_RAM_INIT_STALL_TIME      200
74 
75 /**
76  * smu_dcc_get_max_ports() -
77  *
78  * This macro returns the maximum number of logical ports supported by the
79  * hardware. The caller passes in the value read from the device context
80  * capacity register and this macro will mash and shift the value appropriately.
81  */
82 #define smu_dcc_get_max_ports(dcc_value) \
83 	(\
84 		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
85 		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
86 	)
87 
88 /**
89  * smu_dcc_get_max_task_context() -
90  *
91  * This macro returns the maximum number of task contexts supported by the
92  * hardware. The caller passes in the value read from the device context
93  * capacity register and this macro will mash and shift the value appropriately.
94  */
95 #define smu_dcc_get_max_task_context(dcc_value)	\
96 	(\
97 		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
98 		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
99 	)
100 
101 /**
102  * smu_dcc_get_max_remote_node_context() -
103  *
104  * This macro returns the maximum number of remote node contexts supported by
105  * the hardware. The caller passes in the value read from the device context
106  * capacity register and this macro will mash and shift the value appropriately.
107  */
108 #define smu_dcc_get_max_remote_node_context(dcc_value) \
109 	(\
110 		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
111 		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
112 	)
113 
114 
115 #define SCIC_SDS_CONTROLLER_MIN_TIMER_COUNT  3
116 #define SCIC_SDS_CONTROLLER_MAX_TIMER_COUNT  3
117 
118 /**
119  *
120  *
121  * The number of milliseconds to wait for a phy to start.
122  */
123 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT      100
124 
125 /**
126  *
127  *
128  * The number of milliseconds to wait while a given phy is consuming power
129  * before allowing another set of phys to consume power. Ultimately, this will
130  * be specified by OEM parameter.
131  */
132 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
133 
134 /**
135  * NORMALIZE_PUT_POINTER() -
136  *
137  * This macro will normalize the completion queue put pointer so its value can
138  * be used as an array inde
139  */
140 #define NORMALIZE_PUT_POINTER(x) \
141 	((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
142 
143 
144 /**
145  * NORMALIZE_EVENT_POINTER() -
146  *
147  * This macro will normalize the completion queue event entry so its value can
148  * be used as an index.
149  */
150 #define NORMALIZE_EVENT_POINTER(x) \
151 	(\
152 		((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
153 		>> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT	\
154 	)
155 
156 /**
157  * INCREMENT_COMPLETION_QUEUE_GET() -
158  *
159  * This macro will increment the controllers completion queue index value and
160  * possibly toggle the cycle bit if the completion queue index wraps back to 0.
161  */
162 #define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \
163 	INCREMENT_QUEUE_GET(\
164 		(index), \
165 		(cycle), \
166 		(controller)->completion_queue_entries,	\
167 		SMU_CQGR_CYCLE_BIT \
168 		)
169 
170 /**
171  * INCREMENT_EVENT_QUEUE_GET() -
172  *
173  * This macro will increment the controllers event queue index value and
174  * possibly toggle the event cycle bit if the event queue index wraps back to 0.
175  */
176 #define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \
177 	INCREMENT_QUEUE_GET(\
178 		(index), \
179 		(cycle), \
180 		(controller)->completion_event_entries,	\
181 		SMU_CQGR_EVENT_CYCLE_BIT \
182 		)
183 
184 
185 /**
186  * NORMALIZE_GET_POINTER() -
187  *
188  * This macro will normalize the completion queue get pointer so its value can
189  * be used as an index into an array
190  */
191 #define NORMALIZE_GET_POINTER(x) \
192 	((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
193 
194 /**
195  * NORMALIZE_GET_POINTER_CYCLE_BIT() -
196  *
197  * This macro will normalize the completion queue cycle pointer so it matches
198  * the completion queue cycle bit
199  */
200 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
201 	((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
202 
203 /**
204  * COMPLETION_QUEUE_CYCLE_BIT() -
205  *
206  * This macro will return the cycle bit of the completion queue entry
207  */
208 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
209 
210 static bool scic_sds_controller_completion_queue_has_entries(
211 	struct scic_sds_controller *scic)
212 {
213 	u32 get_value = scic->completion_queue_get;
214 	u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
215 
216 	if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
217 	    COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index]))
218 		return true;
219 
220 	return false;
221 }
222 
223 static bool scic_sds_controller_isr(struct scic_sds_controller *scic)
224 {
225 	if (scic_sds_controller_completion_queue_has_entries(scic)) {
226 		return true;
227 	} else {
228 		/*
229 		 * we have a spurious interrupt it could be that we have already
230 		 * emptied the completion queue from a previous interrupt */
231 		writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
232 
233 		/*
234 		 * There is a race in the hardware that could cause us not to be notified
235 		 * of an interrupt completion if we do not take this step.  We will mask
236 		 * then unmask the interrupts so if there is another interrupt pending
237 		 * the clearing of the interrupt source we get the next interrupt message. */
238 		writel(0xFF000000, &scic->smu_registers->interrupt_mask);
239 		writel(0, &scic->smu_registers->interrupt_mask);
240 	}
241 
242 	return false;
243 }
244 
245 irqreturn_t isci_msix_isr(int vec, void *data)
246 {
247 	struct isci_host *ihost = data;
248 
249 	if (scic_sds_controller_isr(&ihost->sci))
250 		tasklet_schedule(&ihost->completion_tasklet);
251 
252 	return IRQ_HANDLED;
253 }
254 
255 static bool scic_sds_controller_error_isr(struct scic_sds_controller *scic)
256 {
257 	u32 interrupt_status;
258 
259 	interrupt_status =
260 		readl(&scic->smu_registers->interrupt_status);
261 	interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
262 
263 	if (interrupt_status != 0) {
264 		/*
265 		 * There is an error interrupt pending so let it through and handle
266 		 * in the callback */
267 		return true;
268 	}
269 
270 	/*
271 	 * There is a race in the hardware that could cause us not to be notified
272 	 * of an interrupt completion if we do not take this step.  We will mask
273 	 * then unmask the error interrupts so if there was another interrupt
274 	 * pending we will be notified.
275 	 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
276 	writel(0xff, &scic->smu_registers->interrupt_mask);
277 	writel(0, &scic->smu_registers->interrupt_mask);
278 
279 	return false;
280 }
281 
282 static void scic_sds_controller_task_completion(struct scic_sds_controller *scic,
283 						u32 completion_entry)
284 {
285 	u32 index;
286 	struct scic_sds_request *io_request;
287 
288 	index = SCU_GET_COMPLETION_INDEX(completion_entry);
289 	io_request = scic->io_request_table[index];
290 
291 	/* Make sure that we really want to process this IO request */
292 	if (
293 		(io_request != NULL)
294 		&& (io_request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG)
295 		&& (
296 			scic_sds_io_tag_get_sequence(io_request->io_tag)
297 			== scic->io_request_sequence[index]
298 			)
299 		) {
300 		/* Yep this is a valid io request pass it along to the io request handler */
301 		scic_sds_io_request_tc_completion(io_request, completion_entry);
302 	}
303 }
304 
305 static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic,
306 						u32 completion_entry)
307 {
308 	u32 index;
309 	struct scic_sds_request *io_request;
310 	struct scic_sds_remote_device *device;
311 
312 	index = SCU_GET_COMPLETION_INDEX(completion_entry);
313 
314 	switch (scu_get_command_request_type(completion_entry)) {
315 	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
316 	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
317 		io_request = scic->io_request_table[index];
318 		dev_warn(scic_to_dev(scic),
319 			 "%s: SCIC SDS Completion type SDMA %x for io request "
320 			 "%p\n",
321 			 __func__,
322 			 completion_entry,
323 			 io_request);
324 		/* @todo For a post TC operation we need to fail the IO
325 		 * request
326 		 */
327 		break;
328 
329 	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
330 	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
331 	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
332 		device = scic->device_table[index];
333 		dev_warn(scic_to_dev(scic),
334 			 "%s: SCIC SDS Completion type SDMA %x for remote "
335 			 "device %p\n",
336 			 __func__,
337 			 completion_entry,
338 			 device);
339 		/* @todo For a port RNC operation we need to fail the
340 		 * device
341 		 */
342 		break;
343 
344 	default:
345 		dev_warn(scic_to_dev(scic),
346 			 "%s: SCIC SDS Completion unknown SDMA completion "
347 			 "type %x\n",
348 			 __func__,
349 			 completion_entry);
350 		break;
351 
352 	}
353 }
354 
355 static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *scic,
356 						  u32 completion_entry)
357 {
358 	u32 index;
359 	u32 frame_index;
360 
361 	struct isci_host *ihost = scic_to_ihost(scic);
362 	struct scu_unsolicited_frame_header *frame_header;
363 	struct scic_sds_phy *phy;
364 	struct scic_sds_remote_device *device;
365 
366 	enum sci_status result = SCI_FAILURE;
367 
368 	frame_index = SCU_GET_FRAME_INDEX(completion_entry);
369 
370 	frame_header = scic->uf_control.buffers.array[frame_index].header;
371 	scic->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
372 
373 	if (SCU_GET_FRAME_ERROR(completion_entry)) {
374 		/*
375 		 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
376 		 * /       this cause a problem? We expect the phy initialization will
377 		 * /       fail if there is an error in the frame. */
378 		scic_sds_controller_release_frame(scic, frame_index);
379 		return;
380 	}
381 
382 	if (frame_header->is_address_frame) {
383 		index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
384 		phy = &ihost->phys[index].sci;
385 		result = scic_sds_phy_frame_handler(phy, frame_index);
386 	} else {
387 
388 		index = SCU_GET_COMPLETION_INDEX(completion_entry);
389 
390 		if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
391 			/*
392 			 * This is a signature fis or a frame from a direct attached SATA
393 			 * device that has not yet been created.  In either case forwared
394 			 * the frame to the PE and let it take care of the frame data. */
395 			index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
396 			phy = &ihost->phys[index].sci;
397 			result = scic_sds_phy_frame_handler(phy, frame_index);
398 		} else {
399 			if (index < scic->remote_node_entries)
400 				device = scic->device_table[index];
401 			else
402 				device = NULL;
403 
404 			if (device != NULL)
405 				result = scic_sds_remote_device_frame_handler(device, frame_index);
406 			else
407 				scic_sds_controller_release_frame(scic, frame_index);
408 		}
409 	}
410 
411 	if (result != SCI_SUCCESS) {
412 		/*
413 		 * / @todo Is there any reason to report some additional error message
414 		 * /       when we get this failure notifiction? */
415 	}
416 }
417 
418 static void scic_sds_controller_event_completion(struct scic_sds_controller *scic,
419 						 u32 completion_entry)
420 {
421 	struct isci_host *ihost = scic_to_ihost(scic);
422 	struct scic_sds_request *io_request;
423 	struct scic_sds_remote_device *device;
424 	struct scic_sds_phy *phy;
425 	u32 index;
426 
427 	index = SCU_GET_COMPLETION_INDEX(completion_entry);
428 
429 	switch (scu_get_event_type(completion_entry)) {
430 	case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
431 		/* / @todo The driver did something wrong and we need to fix the condtion. */
432 		dev_err(scic_to_dev(scic),
433 			"%s: SCIC Controller 0x%p received SMU command error "
434 			"0x%x\n",
435 			__func__,
436 			scic,
437 			completion_entry);
438 		break;
439 
440 	case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
441 	case SCU_EVENT_TYPE_SMU_ERROR:
442 	case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
443 		/*
444 		 * / @todo This is a hardware failure and its likely that we want to
445 		 * /       reset the controller. */
446 		dev_err(scic_to_dev(scic),
447 			"%s: SCIC Controller 0x%p received fatal controller "
448 			"event  0x%x\n",
449 			__func__,
450 			scic,
451 			completion_entry);
452 		break;
453 
454 	case SCU_EVENT_TYPE_TRANSPORT_ERROR:
455 		io_request = scic->io_request_table[index];
456 		scic_sds_io_request_event_handler(io_request, completion_entry);
457 		break;
458 
459 	case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
460 		switch (scu_get_event_specifier(completion_entry)) {
461 		case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
462 		case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
463 			io_request = scic->io_request_table[index];
464 			if (io_request != NULL)
465 				scic_sds_io_request_event_handler(io_request, completion_entry);
466 			else
467 				dev_warn(scic_to_dev(scic),
468 					 "%s: SCIC Controller 0x%p received "
469 					 "event 0x%x for io request object "
470 					 "that doesnt exist.\n",
471 					 __func__,
472 					 scic,
473 					 completion_entry);
474 
475 			break;
476 
477 		case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
478 			device = scic->device_table[index];
479 			if (device != NULL)
480 				scic_sds_remote_device_event_handler(device, completion_entry);
481 			else
482 				dev_warn(scic_to_dev(scic),
483 					 "%s: SCIC Controller 0x%p received "
484 					 "event 0x%x for remote device object "
485 					 "that doesnt exist.\n",
486 					 __func__,
487 					 scic,
488 					 completion_entry);
489 
490 			break;
491 		}
492 		break;
493 
494 	case SCU_EVENT_TYPE_BROADCAST_CHANGE:
495 	/*
496 	 * direct the broadcast change event to the phy first and then let
497 	 * the phy redirect the broadcast change to the port object */
498 	case SCU_EVENT_TYPE_ERR_CNT_EVENT:
499 	/*
500 	 * direct error counter event to the phy object since that is where
501 	 * we get the event notification.  This is a type 4 event. */
502 	case SCU_EVENT_TYPE_OSSP_EVENT:
503 		index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
504 		phy = &ihost->phys[index].sci;
505 		scic_sds_phy_event_handler(phy, completion_entry);
506 		break;
507 
508 	case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
509 	case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
510 	case SCU_EVENT_TYPE_RNC_OPS_MISC:
511 		if (index < scic->remote_node_entries) {
512 			device = scic->device_table[index];
513 
514 			if (device != NULL)
515 				scic_sds_remote_device_event_handler(device, completion_entry);
516 		} else
517 			dev_err(scic_to_dev(scic),
518 				"%s: SCIC Controller 0x%p received event 0x%x "
519 				"for remote device object 0x%0x that doesnt "
520 				"exist.\n",
521 				__func__,
522 				scic,
523 				completion_entry,
524 				index);
525 
526 		break;
527 
528 	default:
529 		dev_warn(scic_to_dev(scic),
530 			 "%s: SCIC Controller received unknown event code %x\n",
531 			 __func__,
532 			 completion_entry);
533 		break;
534 	}
535 }
536 
537 
538 
539 static void scic_sds_controller_process_completions(struct scic_sds_controller *scic)
540 {
541 	u32 completion_count = 0;
542 	u32 completion_entry;
543 	u32 get_index;
544 	u32 get_cycle;
545 	u32 event_index;
546 	u32 event_cycle;
547 
548 	dev_dbg(scic_to_dev(scic),
549 		"%s: completion queue begining get:0x%08x\n",
550 		__func__,
551 		scic->completion_queue_get);
552 
553 	/* Get the component parts of the completion queue */
554 	get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get);
555 	get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get;
556 
557 	event_index = NORMALIZE_EVENT_POINTER(scic->completion_queue_get);
558 	event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get;
559 
560 	while (
561 		NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
562 		== COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index])
563 		) {
564 		completion_count++;
565 
566 		completion_entry = scic->completion_queue[get_index];
567 		INCREMENT_COMPLETION_QUEUE_GET(scic, get_index, get_cycle);
568 
569 		dev_dbg(scic_to_dev(scic),
570 			"%s: completion queue entry:0x%08x\n",
571 			__func__,
572 			completion_entry);
573 
574 		switch (SCU_GET_COMPLETION_TYPE(completion_entry)) {
575 		case SCU_COMPLETION_TYPE_TASK:
576 			scic_sds_controller_task_completion(scic, completion_entry);
577 			break;
578 
579 		case SCU_COMPLETION_TYPE_SDMA:
580 			scic_sds_controller_sdma_completion(scic, completion_entry);
581 			break;
582 
583 		case SCU_COMPLETION_TYPE_UFI:
584 			scic_sds_controller_unsolicited_frame(scic, completion_entry);
585 			break;
586 
587 		case SCU_COMPLETION_TYPE_EVENT:
588 			INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
589 			scic_sds_controller_event_completion(scic, completion_entry);
590 			break;
591 
592 		case SCU_COMPLETION_TYPE_NOTIFY:
593 			/*
594 			 * Presently we do the same thing with a notify event that we do with the
595 			 * other event codes. */
596 			INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
597 			scic_sds_controller_event_completion(scic, completion_entry);
598 			break;
599 
600 		default:
601 			dev_warn(scic_to_dev(scic),
602 				 "%s: SCIC Controller received unknown "
603 				 "completion type %x\n",
604 				 __func__,
605 				 completion_entry);
606 			break;
607 		}
608 	}
609 
610 	/* Update the get register if we completed one or more entries */
611 	if (completion_count > 0) {
612 		scic->completion_queue_get =
613 			SMU_CQGR_GEN_BIT(ENABLE) |
614 			SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
615 			event_cycle |
616 			SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index) |
617 			get_cycle |
618 			SMU_CQGR_GEN_VAL(POINTER, get_index);
619 
620 		writel(scic->completion_queue_get,
621 		       &scic->smu_registers->completion_queue_get);
622 
623 	}
624 
625 	dev_dbg(scic_to_dev(scic),
626 		"%s: completion queue ending get:0x%08x\n",
627 		__func__,
628 		scic->completion_queue_get);
629 
630 }
631 
632 static void scic_sds_controller_error_handler(struct scic_sds_controller *scic)
633 {
634 	u32 interrupt_status;
635 
636 	interrupt_status =
637 		readl(&scic->smu_registers->interrupt_status);
638 
639 	if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
640 	    scic_sds_controller_completion_queue_has_entries(scic)) {
641 
642 		scic_sds_controller_process_completions(scic);
643 		writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status);
644 	} else {
645 		dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__,
646 			interrupt_status);
647 
648 		sci_base_state_machine_change_state(&scic->state_machine,
649 						    SCI_BASE_CONTROLLER_STATE_FAILED);
650 
651 		return;
652 	}
653 
654 	/* If we dont process any completions I am not sure that we want to do this.
655 	 * We are in the middle of a hardware fault and should probably be reset.
656 	 */
657 	writel(0, &scic->smu_registers->interrupt_mask);
658 }
659 
660 irqreturn_t isci_intx_isr(int vec, void *data)
661 {
662 	irqreturn_t ret = IRQ_NONE;
663 	struct isci_host *ihost = data;
664 	struct scic_sds_controller *scic = &ihost->sci;
665 
666 	if (scic_sds_controller_isr(scic)) {
667 		writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
668 		tasklet_schedule(&ihost->completion_tasklet);
669 		ret = IRQ_HANDLED;
670 	} else if (scic_sds_controller_error_isr(scic)) {
671 		spin_lock(&ihost->scic_lock);
672 		scic_sds_controller_error_handler(scic);
673 		spin_unlock(&ihost->scic_lock);
674 		ret = IRQ_HANDLED;
675 	}
676 
677 	return ret;
678 }
679 
680 irqreturn_t isci_error_isr(int vec, void *data)
681 {
682 	struct isci_host *ihost = data;
683 
684 	if (scic_sds_controller_error_isr(&ihost->sci))
685 		scic_sds_controller_error_handler(&ihost->sci);
686 
687 	return IRQ_HANDLED;
688 }
689 
690 /**
691  * isci_host_start_complete() - This function is called by the core library,
692  *    through the ISCI Module, to indicate controller start status.
693  * @isci_host: This parameter specifies the ISCI host object
694  * @completion_status: This parameter specifies the completion status from the
695  *    core library.
696  *
697  */
698 static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
699 {
700 	if (completion_status != SCI_SUCCESS)
701 		dev_info(&ihost->pdev->dev,
702 			"controller start timed out, continuing...\n");
703 	isci_host_change_state(ihost, isci_ready);
704 	clear_bit(IHOST_START_PENDING, &ihost->flags);
705 	wake_up(&ihost->eventq);
706 }
707 
708 int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
709 {
710 	struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
711 
712 	if (test_bit(IHOST_START_PENDING, &ihost->flags))
713 		return 0;
714 
715 	/* todo: use sas_flush_discovery once it is upstream */
716 	scsi_flush_work(shost);
717 
718 	scsi_flush_work(shost);
719 
720 	dev_dbg(&ihost->pdev->dev,
721 		"%s: ihost->status = %d, time = %ld\n",
722 		 __func__, isci_host_get_state(ihost), time);
723 
724 	return 1;
725 
726 }
727 
728 /**
729  * scic_controller_get_suggested_start_timeout() - This method returns the
730  *    suggested scic_controller_start() timeout amount.  The user is free to
731  *    use any timeout value, but this method provides the suggested minimum
732  *    start timeout value.  The returned value is based upon empirical
733  *    information determined as a result of interoperability testing.
734  * @controller: the handle to the controller object for which to return the
735  *    suggested start timeout.
736  *
737  * This method returns the number of milliseconds for the suggested start
738  * operation timeout.
739  */
740 static u32 scic_controller_get_suggested_start_timeout(
741 	struct scic_sds_controller *sc)
742 {
743 	/* Validate the user supplied parameters. */
744 	if (sc == NULL)
745 		return 0;
746 
747 	/*
748 	 * The suggested minimum timeout value for a controller start operation:
749 	 *
750 	 *     Signature FIS Timeout
751 	 *   + Phy Start Timeout
752 	 *   + Number of Phy Spin Up Intervals
753 	 *   ---------------------------------
754 	 *   Number of milliseconds for the controller start operation.
755 	 *
756 	 * NOTE: The number of phy spin up intervals will be equivalent
757 	 *       to the number of phys divided by the number phys allowed
758 	 *       per interval - 1 (once OEM parameters are supported).
759 	 *       Currently we assume only 1 phy per interval. */
760 
761 	return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
762 		+ SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
763 		+ ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
764 }
765 
766 static void scic_controller_enable_interrupts(
767 	struct scic_sds_controller *scic)
768 {
769 	BUG_ON(scic->smu_registers == NULL);
770 	writel(0, &scic->smu_registers->interrupt_mask);
771 }
772 
773 void scic_controller_disable_interrupts(
774 	struct scic_sds_controller *scic)
775 {
776 	BUG_ON(scic->smu_registers == NULL);
777 	writel(0xffffffff, &scic->smu_registers->interrupt_mask);
778 }
779 
780 static void scic_sds_controller_enable_port_task_scheduler(
781 	struct scic_sds_controller *scic)
782 {
783 	u32 port_task_scheduler_value;
784 
785 	port_task_scheduler_value =
786 		readl(&scic->scu_registers->peg0.ptsg.control);
787 	port_task_scheduler_value |=
788 		(SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
789 		 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
790 	writel(port_task_scheduler_value,
791 	       &scic->scu_registers->peg0.ptsg.control);
792 }
793 
794 static void scic_sds_controller_assign_task_entries(struct scic_sds_controller *scic)
795 {
796 	u32 task_assignment;
797 
798 	/*
799 	 * Assign all the TCs to function 0
800 	 * TODO: Do we actually need to read this register to write it back?
801 	 */
802 
803 	task_assignment =
804 		readl(&scic->smu_registers->task_context_assignment[0]);
805 
806 	task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
807 		(SMU_TCA_GEN_VAL(ENDING,  scic->task_context_entries - 1)) |
808 		(SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
809 
810 	writel(task_assignment,
811 		&scic->smu_registers->task_context_assignment[0]);
812 
813 }
814 
815 static void scic_sds_controller_initialize_completion_queue(struct scic_sds_controller *scic)
816 {
817 	u32 index;
818 	u32 completion_queue_control_value;
819 	u32 completion_queue_get_value;
820 	u32 completion_queue_put_value;
821 
822 	scic->completion_queue_get = 0;
823 
824 	completion_queue_control_value = (
825 		SMU_CQC_QUEUE_LIMIT_SET(scic->completion_queue_entries - 1)
826 		| SMU_CQC_EVENT_LIMIT_SET(scic->completion_event_entries - 1)
827 		);
828 
829 	writel(completion_queue_control_value,
830 	       &scic->smu_registers->completion_queue_control);
831 
832 
833 	/* Set the completion queue get pointer and enable the queue */
834 	completion_queue_get_value = (
835 		(SMU_CQGR_GEN_VAL(POINTER, 0))
836 		| (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
837 		| (SMU_CQGR_GEN_BIT(ENABLE))
838 		| (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
839 		);
840 
841 	writel(completion_queue_get_value,
842 	       &scic->smu_registers->completion_queue_get);
843 
844 	/* Set the completion queue put pointer */
845 	completion_queue_put_value = (
846 		(SMU_CQPR_GEN_VAL(POINTER, 0))
847 		| (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
848 		);
849 
850 	writel(completion_queue_put_value,
851 	       &scic->smu_registers->completion_queue_put);
852 
853 	/* Initialize the cycle bit of the completion queue entries */
854 	for (index = 0; index < scic->completion_queue_entries; index++) {
855 		/*
856 		 * If get.cycle_bit != completion_queue.cycle_bit
857 		 * its not a valid completion queue entry
858 		 * so at system start all entries are invalid */
859 		scic->completion_queue[index] = 0x80000000;
860 	}
861 }
862 
863 static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_sds_controller *scic)
864 {
865 	u32 frame_queue_control_value;
866 	u32 frame_queue_get_value;
867 	u32 frame_queue_put_value;
868 
869 	/* Write the queue size */
870 	frame_queue_control_value =
871 		SCU_UFQC_GEN_VAL(QUEUE_SIZE,
872 				 scic->uf_control.address_table.count);
873 
874 	writel(frame_queue_control_value,
875 	       &scic->scu_registers->sdma.unsolicited_frame_queue_control);
876 
877 	/* Setup the get pointer for the unsolicited frame queue */
878 	frame_queue_get_value = (
879 		SCU_UFQGP_GEN_VAL(POINTER, 0)
880 		|  SCU_UFQGP_GEN_BIT(ENABLE_BIT)
881 		);
882 
883 	writel(frame_queue_get_value,
884 	       &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
885 	/* Setup the put pointer for the unsolicited frame queue */
886 	frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
887 	writel(frame_queue_put_value,
888 	       &scic->scu_registers->sdma.unsolicited_frame_put_pointer);
889 }
890 
891 /**
892  * This method will attempt to transition into the ready state for the
893  *    controller and indicate that the controller start operation has completed
894  *    if all criteria are met.
895  * @scic: This parameter indicates the controller object for which
896  *    to transition to ready.
897  * @status: This parameter indicates the status value to be pass into the call
898  *    to scic_cb_controller_start_complete().
899  *
900  * none.
901  */
902 static void scic_sds_controller_transition_to_ready(
903 	struct scic_sds_controller *scic,
904 	enum sci_status status)
905 {
906 	struct isci_host *ihost = scic_to_ihost(scic);
907 
908 	if (scic->state_machine.current_state_id ==
909 	    SCI_BASE_CONTROLLER_STATE_STARTING) {
910 		/*
911 		 * We move into the ready state, because some of the phys/ports
912 		 * may be up and operational.
913 		 */
914 		sci_base_state_machine_change_state(&scic->state_machine,
915 						    SCI_BASE_CONTROLLER_STATE_READY);
916 
917 		isci_host_start_complete(ihost, status);
918 	}
919 }
920 
921 static void scic_sds_controller_phy_timer_stop(struct scic_sds_controller *scic)
922 {
923 	isci_timer_stop(scic->phy_startup_timer);
924 
925 	scic->phy_startup_timer_pending = false;
926 }
927 
928 static void scic_sds_controller_phy_timer_start(struct scic_sds_controller *scic)
929 {
930 	isci_timer_start(scic->phy_startup_timer,
931 			 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
932 
933 	scic->phy_startup_timer_pending = true;
934 }
935 
936 /**
937  * scic_sds_controller_start_next_phy - start phy
938  * @scic: controller
939  *
940  * If all the phys have been started, then attempt to transition the
941  * controller to the READY state and inform the user
942  * (scic_cb_controller_start_complete()).
943  */
944 static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic)
945 {
946 	struct isci_host *ihost = scic_to_ihost(scic);
947 	struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
948 	struct scic_sds_phy *sci_phy;
949 	enum sci_status status;
950 
951 	status = SCI_SUCCESS;
952 
953 	if (scic->phy_startup_timer_pending)
954 		return status;
955 
956 	if (scic->next_phy_to_start >= SCI_MAX_PHYS) {
957 		bool is_controller_start_complete = true;
958 		u32 state;
959 		u8 index;
960 
961 		for (index = 0; index < SCI_MAX_PHYS; index++) {
962 			sci_phy = &ihost->phys[index].sci;
963 			state = sci_phy->state_machine.current_state_id;
964 
965 			if (!scic_sds_phy_get_port(sci_phy))
966 				continue;
967 
968 			/* The controller start operation is complete iff:
969 			 * - all links have been given an opportunity to start
970 			 * - have no indication of a connected device
971 			 * - have an indication of a connected device and it has
972 			 *   finished the link training process.
973 			 */
974 			if ((sci_phy->is_in_link_training == false &&
975 			     state == SCI_BASE_PHY_STATE_INITIAL) ||
976 			    (sci_phy->is_in_link_training == false &&
977 			     state == SCI_BASE_PHY_STATE_STOPPED) ||
978 			    (sci_phy->is_in_link_training == true &&
979 			     state == SCI_BASE_PHY_STATE_STARTING)) {
980 				is_controller_start_complete = false;
981 				break;
982 			}
983 		}
984 
985 		/*
986 		 * The controller has successfully finished the start process.
987 		 * Inform the SCI Core user and transition to the READY state. */
988 		if (is_controller_start_complete == true) {
989 			scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS);
990 			scic_sds_controller_phy_timer_stop(scic);
991 		}
992 	} else {
993 		sci_phy = &ihost->phys[scic->next_phy_to_start].sci;
994 
995 		if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
996 			if (scic_sds_phy_get_port(sci_phy) == NULL) {
997 				scic->next_phy_to_start++;
998 
999 				/* Caution recursion ahead be forwarned
1000 				 *
1001 				 * The PHY was never added to a PORT in MPC mode
1002 				 * so start the next phy in sequence This phy
1003 				 * will never go link up and will not draw power
1004 				 * the OEM parameters either configured the phy
1005 				 * incorrectly for the PORT or it was never
1006 				 * assigned to a PORT
1007 				 */
1008 				return scic_sds_controller_start_next_phy(scic);
1009 			}
1010 		}
1011 
1012 		status = scic_sds_phy_start(sci_phy);
1013 
1014 		if (status == SCI_SUCCESS) {
1015 			scic_sds_controller_phy_timer_start(scic);
1016 		} else {
1017 			dev_warn(scic_to_dev(scic),
1018 				 "%s: Controller stop operation failed "
1019 				 "to stop phy %d because of status "
1020 				 "%d.\n",
1021 				 __func__,
1022 				 ihost->phys[scic->next_phy_to_start].sci.phy_index,
1023 				 status);
1024 		}
1025 
1026 		scic->next_phy_to_start++;
1027 	}
1028 
1029 	return status;
1030 }
1031 
1032 static void scic_sds_controller_phy_startup_timeout_handler(void *_scic)
1033 {
1034 	struct scic_sds_controller *scic = _scic;
1035 	enum sci_status status;
1036 
1037 	scic->phy_startup_timer_pending = false;
1038 	status = SCI_FAILURE;
1039 	while (status != SCI_SUCCESS)
1040 		status = scic_sds_controller_start_next_phy(scic);
1041 }
1042 
1043 static enum sci_status scic_controller_start(struct scic_sds_controller *scic,
1044 					     u32 timeout)
1045 {
1046 	struct isci_host *ihost = scic_to_ihost(scic);
1047 	enum sci_status result;
1048 	u16 index;
1049 
1050 	if (scic->state_machine.current_state_id !=
1051 	    SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
1052 		dev_warn(scic_to_dev(scic),
1053 			 "SCIC Controller start operation requested in "
1054 			 "invalid state\n");
1055 		return SCI_FAILURE_INVALID_STATE;
1056 	}
1057 
1058 	/* Build the TCi free pool */
1059 	sci_pool_initialize(scic->tci_pool);
1060 	for (index = 0; index < scic->task_context_entries; index++)
1061 		sci_pool_put(scic->tci_pool, index);
1062 
1063 	/* Build the RNi free pool */
1064 	scic_sds_remote_node_table_initialize(
1065 			&scic->available_remote_nodes,
1066 			scic->remote_node_entries);
1067 
1068 	/*
1069 	 * Before anything else lets make sure we will not be
1070 	 * interrupted by the hardware.
1071 	 */
1072 	scic_controller_disable_interrupts(scic);
1073 
1074 	/* Enable the port task scheduler */
1075 	scic_sds_controller_enable_port_task_scheduler(scic);
1076 
1077 	/* Assign all the task entries to scic physical function */
1078 	scic_sds_controller_assign_task_entries(scic);
1079 
1080 	/* Now initialize the completion queue */
1081 	scic_sds_controller_initialize_completion_queue(scic);
1082 
1083 	/* Initialize the unsolicited frame queue for use */
1084 	scic_sds_controller_initialize_unsolicited_frame_queue(scic);
1085 
1086 	/* Start all of the ports on this controller */
1087 	for (index = 0; index < scic->logical_port_entries; index++) {
1088 		struct scic_sds_port *sci_port = &ihost->ports[index].sci;
1089 
1090 		result = sci_port->state_handlers->start_handler(sci_port);
1091 		if (result)
1092 			return result;
1093 	}
1094 
1095 	scic_sds_controller_start_next_phy(scic);
1096 
1097 	isci_timer_start(scic->timeout_timer, timeout);
1098 
1099 	sci_base_state_machine_change_state(&scic->state_machine,
1100 					    SCI_BASE_CONTROLLER_STATE_STARTING);
1101 
1102 	return SCI_SUCCESS;
1103 }
1104 
1105 void isci_host_scan_start(struct Scsi_Host *shost)
1106 {
1107 	struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1108 	unsigned long tmo = scic_controller_get_suggested_start_timeout(&ihost->sci);
1109 
1110 	set_bit(IHOST_START_PENDING, &ihost->flags);
1111 
1112 	spin_lock_irq(&ihost->scic_lock);
1113 	scic_controller_start(&ihost->sci, tmo);
1114 	scic_controller_enable_interrupts(&ihost->sci);
1115 	spin_unlock_irq(&ihost->scic_lock);
1116 }
1117 
1118 static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
1119 {
1120 	isci_host_change_state(ihost, isci_stopped);
1121 	scic_controller_disable_interrupts(&ihost->sci);
1122 	clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1123 	wake_up(&ihost->eventq);
1124 }
1125 
1126 static void scic_sds_controller_completion_handler(struct scic_sds_controller *scic)
1127 {
1128 	/* Empty out the completion queue */
1129 	if (scic_sds_controller_completion_queue_has_entries(scic))
1130 		scic_sds_controller_process_completions(scic);
1131 
1132 	/* Clear the interrupt and enable all interrupts again */
1133 	writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
1134 	/* Could we write the value of SMU_ISR_COMPLETION? */
1135 	writel(0xFF000000, &scic->smu_registers->interrupt_mask);
1136 	writel(0, &scic->smu_registers->interrupt_mask);
1137 }
1138 
1139 /**
1140  * isci_host_completion_routine() - This function is the delayed service
1141  *    routine that calls the sci core library's completion handler. It's
1142  *    scheduled as a tasklet from the interrupt service routine when interrupts
1143  *    in use, or set as the timeout function in polled mode.
1144  * @data: This parameter specifies the ISCI host object
1145  *
1146  */
1147 static void isci_host_completion_routine(unsigned long data)
1148 {
1149 	struct isci_host *isci_host = (struct isci_host *)data;
1150 	struct list_head    completed_request_list;
1151 	struct list_head    errored_request_list;
1152 	struct list_head    *current_position;
1153 	struct list_head    *next_position;
1154 	struct isci_request *request;
1155 	struct isci_request *next_request;
1156 	struct sas_task     *task;
1157 
1158 	INIT_LIST_HEAD(&completed_request_list);
1159 	INIT_LIST_HEAD(&errored_request_list);
1160 
1161 	spin_lock_irq(&isci_host->scic_lock);
1162 
1163 	scic_sds_controller_completion_handler(&isci_host->sci);
1164 
1165 	/* Take the lists of completed I/Os from the host. */
1166 
1167 	list_splice_init(&isci_host->requests_to_complete,
1168 			 &completed_request_list);
1169 
1170 	/* Take the list of errored I/Os from the host. */
1171 	list_splice_init(&isci_host->requests_to_errorback,
1172 			 &errored_request_list);
1173 
1174 	spin_unlock_irq(&isci_host->scic_lock);
1175 
1176 	/* Process any completions in the lists. */
1177 	list_for_each_safe(current_position, next_position,
1178 			   &completed_request_list) {
1179 
1180 		request = list_entry(current_position, struct isci_request,
1181 				     completed_node);
1182 		task = isci_request_access_task(request);
1183 
1184 		/* Normal notification (task_done) */
1185 		dev_dbg(&isci_host->pdev->dev,
1186 			"%s: Normal - request/task = %p/%p\n",
1187 			__func__,
1188 			request,
1189 			task);
1190 
1191 		/* Return the task to libsas */
1192 		if (task != NULL) {
1193 
1194 			task->lldd_task = NULL;
1195 			if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1196 
1197 				/* If the task is already in the abort path,
1198 				* the task_done callback cannot be called.
1199 				*/
1200 				task->task_done(task);
1201 			}
1202 		}
1203 		/* Free the request object. */
1204 		isci_request_free(isci_host, request);
1205 	}
1206 	list_for_each_entry_safe(request, next_request, &errored_request_list,
1207 				 completed_node) {
1208 
1209 		task = isci_request_access_task(request);
1210 
1211 		/* Use sas_task_abort */
1212 		dev_warn(&isci_host->pdev->dev,
1213 			 "%s: Error - request/task = %p/%p\n",
1214 			 __func__,
1215 			 request,
1216 			 task);
1217 
1218 		if (task != NULL) {
1219 
1220 			/* Put the task into the abort path if it's not there
1221 			 * already.
1222 			 */
1223 			if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
1224 				sas_task_abort(task);
1225 
1226 		} else {
1227 			/* This is a case where the request has completed with a
1228 			 * status such that it needed further target servicing,
1229 			 * but the sas_task reference has already been removed
1230 			 * from the request.  Since it was errored, it was not
1231 			 * being aborted, so there is nothing to do except free
1232 			 * it.
1233 			 */
1234 
1235 			spin_lock_irq(&isci_host->scic_lock);
1236 			/* Remove the request from the remote device's list
1237 			* of pending requests.
1238 			*/
1239 			list_del_init(&request->dev_node);
1240 			spin_unlock_irq(&isci_host->scic_lock);
1241 
1242 			/* Free the request object. */
1243 			isci_request_free(isci_host, request);
1244 		}
1245 	}
1246 
1247 }
1248 
1249 /**
1250  * scic_controller_stop() - This method will stop an individual controller
1251  *    object.This method will invoke the associated user callback upon
1252  *    completion.  The completion callback is called when the following
1253  *    conditions are met: -# the method return status is SCI_SUCCESS. -# the
1254  *    controller has been quiesced. This method will ensure that all IO
1255  *    requests are quiesced, phys are stopped, and all additional operation by
1256  *    the hardware is halted.
1257  * @controller: the handle to the controller object to stop.
1258  * @timeout: This parameter specifies the number of milliseconds in which the
1259  *    stop operation should complete.
1260  *
1261  * The controller must be in the STARTED or STOPPED state. Indicate if the
1262  * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1263  * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1264  * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1265  * controller is not either in the STARTED or STOPPED states.
1266  */
1267 static enum sci_status scic_controller_stop(struct scic_sds_controller *scic,
1268 					    u32 timeout)
1269 {
1270 	if (scic->state_machine.current_state_id !=
1271 	    SCI_BASE_CONTROLLER_STATE_READY) {
1272 		dev_warn(scic_to_dev(scic),
1273 			 "SCIC Controller stop operation requested in "
1274 			 "invalid state\n");
1275 		return SCI_FAILURE_INVALID_STATE;
1276 	}
1277 
1278 	isci_timer_start(scic->timeout_timer, timeout);
1279 	sci_base_state_machine_change_state(&scic->state_machine,
1280 					    SCI_BASE_CONTROLLER_STATE_STOPPING);
1281 	return SCI_SUCCESS;
1282 }
1283 
1284 /**
1285  * scic_controller_reset() - This method will reset the supplied core
1286  *    controller regardless of the state of said controller.  This operation is
1287  *    considered destructive.  In other words, all current operations are wiped
1288  *    out.  No IO completions for outstanding devices occur.  Outstanding IO
1289  *    requests are not aborted or completed at the actual remote device.
1290  * @controller: the handle to the controller object to reset.
1291  *
1292  * Indicate if the controller reset method succeeded or failed in some way.
1293  * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1294  * the controller reset operation is unable to complete.
1295  */
1296 static enum sci_status scic_controller_reset(struct scic_sds_controller *scic)
1297 {
1298 	switch (scic->state_machine.current_state_id) {
1299 	case SCI_BASE_CONTROLLER_STATE_RESET:
1300 	case SCI_BASE_CONTROLLER_STATE_READY:
1301 	case SCI_BASE_CONTROLLER_STATE_STOPPED:
1302 	case SCI_BASE_CONTROLLER_STATE_FAILED:
1303 		/*
1304 		 * The reset operation is not a graceful cleanup, just
1305 		 * perform the state transition.
1306 		 */
1307 		sci_base_state_machine_change_state(&scic->state_machine,
1308 				SCI_BASE_CONTROLLER_STATE_RESETTING);
1309 		return SCI_SUCCESS;
1310 	default:
1311 		dev_warn(scic_to_dev(scic),
1312 			 "SCIC Controller reset operation requested in "
1313 			 "invalid state\n");
1314 		return SCI_FAILURE_INVALID_STATE;
1315 	}
1316 }
1317 
1318 void isci_host_deinit(struct isci_host *ihost)
1319 {
1320 	int i;
1321 
1322 	isci_host_change_state(ihost, isci_stopping);
1323 	for (i = 0; i < SCI_MAX_PORTS; i++) {
1324 		struct isci_port *iport = &ihost->ports[i];
1325 		struct isci_remote_device *idev, *d;
1326 
1327 		list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
1328 			isci_remote_device_change_state(idev, isci_stopping);
1329 			isci_remote_device_stop(ihost, idev);
1330 		}
1331 	}
1332 
1333 	set_bit(IHOST_STOP_PENDING, &ihost->flags);
1334 
1335 	spin_lock_irq(&ihost->scic_lock);
1336 	scic_controller_stop(&ihost->sci, SCIC_CONTROLLER_STOP_TIMEOUT);
1337 	spin_unlock_irq(&ihost->scic_lock);
1338 
1339 	wait_for_stop(ihost);
1340 	scic_controller_reset(&ihost->sci);
1341 	isci_timer_list_destroy(ihost);
1342 }
1343 
1344 static void __iomem *scu_base(struct isci_host *isci_host)
1345 {
1346 	struct pci_dev *pdev = isci_host->pdev;
1347 	int id = isci_host->id;
1348 
1349 	return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1350 }
1351 
1352 static void __iomem *smu_base(struct isci_host *isci_host)
1353 {
1354 	struct pci_dev *pdev = isci_host->pdev;
1355 	int id = isci_host->id;
1356 
1357 	return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1358 }
1359 
1360 static void isci_user_parameters_get(
1361 		struct isci_host *isci_host,
1362 		union scic_user_parameters *scic_user_params)
1363 {
1364 	struct scic_sds_user_parameters *u = &scic_user_params->sds1;
1365 	int i;
1366 
1367 	for (i = 0; i < SCI_MAX_PHYS; i++) {
1368 		struct sci_phy_user_params *u_phy = &u->phys[i];
1369 
1370 		u_phy->max_speed_generation = phy_gen;
1371 
1372 		/* we are not exporting these for now */
1373 		u_phy->align_insertion_frequency = 0x7f;
1374 		u_phy->in_connection_align_insertion_frequency = 0xff;
1375 		u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
1376 	}
1377 
1378 	u->stp_inactivity_timeout = stp_inactive_to;
1379 	u->ssp_inactivity_timeout = ssp_inactive_to;
1380 	u->stp_max_occupancy_timeout = stp_max_occ_to;
1381 	u->ssp_max_occupancy_timeout = ssp_max_occ_to;
1382 	u->no_outbound_task_timeout = no_outbound_task_to;
1383 	u->max_number_concurrent_device_spin_up = max_concurr_spinup;
1384 }
1385 
1386 static void scic_sds_controller_initial_state_enter(void *object)
1387 {
1388 	struct scic_sds_controller *scic = object;
1389 
1390 	sci_base_state_machine_change_state(&scic->state_machine,
1391 			SCI_BASE_CONTROLLER_STATE_RESET);
1392 }
1393 
1394 static inline void scic_sds_controller_starting_state_exit(void *object)
1395 {
1396 	struct scic_sds_controller *scic = object;
1397 
1398 	isci_timer_stop(scic->timeout_timer);
1399 }
1400 
1401 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1402 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1403 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US                    2700000
1404 #define INTERRUPT_COALESCE_NUMBER_MAX                        256
1405 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN                7
1406 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX                28
1407 
1408 /**
1409  * scic_controller_set_interrupt_coalescence() - This method allows the user to
1410  *    configure the interrupt coalescence.
1411  * @controller: This parameter represents the handle to the controller object
1412  *    for which its interrupt coalesce register is overridden.
1413  * @coalesce_number: Used to control the number of entries in the Completion
1414  *    Queue before an interrupt is generated. If the number of entries exceed
1415  *    this number, an interrupt will be generated. The valid range of the input
1416  *    is [0, 256]. A setting of 0 results in coalescing being disabled.
1417  * @coalesce_timeout: Timeout value in microseconds. The valid range of the
1418  *    input is [0, 2700000] . A setting of 0 is allowed and results in no
1419  *    interrupt coalescing timeout.
1420  *
1421  * Indicate if the user successfully set the interrupt coalesce parameters.
1422  * SCI_SUCCESS The user successfully updated the interrutp coalescence.
1423  * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1424  */
1425 static enum sci_status scic_controller_set_interrupt_coalescence(
1426 	struct scic_sds_controller *scic_controller,
1427 	u32 coalesce_number,
1428 	u32 coalesce_timeout)
1429 {
1430 	u8 timeout_encode = 0;
1431 	u32 min = 0;
1432 	u32 max = 0;
1433 
1434 	/* Check if the input parameters fall in the range. */
1435 	if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1436 		return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1437 
1438 	/*
1439 	 *  Defined encoding for interrupt coalescing timeout:
1440 	 *              Value   Min      Max     Units
1441 	 *              -----   ---      ---     -----
1442 	 *              0       -        -       Disabled
1443 	 *              1       13.3     20.0    ns
1444 	 *              2       26.7     40.0
1445 	 *              3       53.3     80.0
1446 	 *              4       106.7    160.0
1447 	 *              5       213.3    320.0
1448 	 *              6       426.7    640.0
1449 	 *              7       853.3    1280.0
1450 	 *              8       1.7      2.6     us
1451 	 *              9       3.4      5.1
1452 	 *              10      6.8      10.2
1453 	 *              11      13.7     20.5
1454 	 *              12      27.3     41.0
1455 	 *              13      54.6     81.9
1456 	 *              14      109.2    163.8
1457 	 *              15      218.5    327.7
1458 	 *              16      436.9    655.4
1459 	 *              17      873.8    1310.7
1460 	 *              18      1.7      2.6     ms
1461 	 *              19      3.5      5.2
1462 	 *              20      7.0      10.5
1463 	 *              21      14.0     21.0
1464 	 *              22      28.0     41.9
1465 	 *              23      55.9     83.9
1466 	 *              24      111.8    167.8
1467 	 *              25      223.7    335.5
1468 	 *              26      447.4    671.1
1469 	 *              27      894.8    1342.2
1470 	 *              28      1.8      2.7     s
1471 	 *              Others Undefined */
1472 
1473 	/*
1474 	 * Use the table above to decide the encode of interrupt coalescing timeout
1475 	 * value for register writing. */
1476 	if (coalesce_timeout == 0)
1477 		timeout_encode = 0;
1478 	else{
1479 		/* make the timeout value in unit of (10 ns). */
1480 		coalesce_timeout = coalesce_timeout * 100;
1481 		min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1482 		max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1483 
1484 		/* get the encode of timeout for register writing. */
1485 		for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1486 		      timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1487 		      timeout_encode++) {
1488 			if (min <= coalesce_timeout &&  max > coalesce_timeout)
1489 				break;
1490 			else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1491 				 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1492 				if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1493 					break;
1494 				else{
1495 					timeout_encode++;
1496 					break;
1497 				}
1498 			} else {
1499 				max = max * 2;
1500 				min = min * 2;
1501 			}
1502 		}
1503 
1504 		if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1505 			/* the value is out of range. */
1506 			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1507 	}
1508 
1509 	writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1510 	       SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1511 	       &scic_controller->smu_registers->interrupt_coalesce_control);
1512 
1513 
1514 	scic_controller->interrupt_coalesce_number = (u16)coalesce_number;
1515 	scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100;
1516 
1517 	return SCI_SUCCESS;
1518 }
1519 
1520 
1521 static void scic_sds_controller_ready_state_enter(void *object)
1522 {
1523 	struct scic_sds_controller *scic = object;
1524 
1525 	/* set the default interrupt coalescence number and timeout value. */
1526 	scic_controller_set_interrupt_coalescence(scic, 0x10, 250);
1527 }
1528 
1529 static void scic_sds_controller_ready_state_exit(void *object)
1530 {
1531 	struct scic_sds_controller *scic = object;
1532 
1533 	/* disable interrupt coalescence. */
1534 	scic_controller_set_interrupt_coalescence(scic, 0, 0);
1535 }
1536 
1537 static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic)
1538 {
1539 	u32 index;
1540 	enum sci_status status;
1541 	enum sci_status phy_status;
1542 	struct isci_host *ihost = scic_to_ihost(scic);
1543 
1544 	status = SCI_SUCCESS;
1545 
1546 	for (index = 0; index < SCI_MAX_PHYS; index++) {
1547 		phy_status = scic_sds_phy_stop(&ihost->phys[index].sci);
1548 
1549 		if (phy_status != SCI_SUCCESS &&
1550 		    phy_status != SCI_FAILURE_INVALID_STATE) {
1551 			status = SCI_FAILURE;
1552 
1553 			dev_warn(scic_to_dev(scic),
1554 				 "%s: Controller stop operation failed to stop "
1555 				 "phy %d because of status %d.\n",
1556 				 __func__,
1557 				 ihost->phys[index].sci.phy_index, phy_status);
1558 		}
1559 	}
1560 
1561 	return status;
1562 }
1563 
1564 static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic)
1565 {
1566 	u32 index;
1567 	enum sci_status port_status;
1568 	enum sci_status status = SCI_SUCCESS;
1569 	struct isci_host *ihost = scic_to_ihost(scic);
1570 
1571 	for (index = 0; index < scic->logical_port_entries; index++) {
1572 		struct scic_sds_port *sci_port = &ihost->ports[index].sci;
1573 		scic_sds_port_handler_t stop;
1574 
1575 		stop = sci_port->state_handlers->stop_handler;
1576 		port_status = stop(sci_port);
1577 
1578 		if ((port_status != SCI_SUCCESS) &&
1579 		    (port_status != SCI_FAILURE_INVALID_STATE)) {
1580 			status = SCI_FAILURE;
1581 
1582 			dev_warn(scic_to_dev(scic),
1583 				 "%s: Controller stop operation failed to "
1584 				 "stop port %d because of status %d.\n",
1585 				 __func__,
1586 				 sci_port->logical_port_index,
1587 				 port_status);
1588 		}
1589 	}
1590 
1591 	return status;
1592 }
1593 
1594 static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic)
1595 {
1596 	u32 index;
1597 	enum sci_status status;
1598 	enum sci_status device_status;
1599 
1600 	status = SCI_SUCCESS;
1601 
1602 	for (index = 0; index < scic->remote_node_entries; index++) {
1603 		if (scic->device_table[index] != NULL) {
1604 			/* / @todo What timeout value do we want to provide to this request? */
1605 			device_status = scic_remote_device_stop(scic->device_table[index], 0);
1606 
1607 			if ((device_status != SCI_SUCCESS) &&
1608 			    (device_status != SCI_FAILURE_INVALID_STATE)) {
1609 				dev_warn(scic_to_dev(scic),
1610 					 "%s: Controller stop operation failed "
1611 					 "to stop device 0x%p because of "
1612 					 "status %d.\n",
1613 					 __func__,
1614 					 scic->device_table[index], device_status);
1615 			}
1616 		}
1617 	}
1618 
1619 	return status;
1620 }
1621 
1622 static void scic_sds_controller_stopping_state_enter(void *object)
1623 {
1624 	struct scic_sds_controller *scic = object;
1625 
1626 	/* Stop all of the components for this controller */
1627 	scic_sds_controller_stop_phys(scic);
1628 	scic_sds_controller_stop_ports(scic);
1629 	scic_sds_controller_stop_devices(scic);
1630 }
1631 
1632 static void scic_sds_controller_stopping_state_exit(void *object)
1633 {
1634 	struct scic_sds_controller *scic = object;
1635 
1636 	isci_timer_stop(scic->timeout_timer);
1637 }
1638 
1639 
1640 /**
1641  * scic_sds_controller_reset_hardware() -
1642  *
1643  * This method will reset the controller hardware.
1644  */
1645 static void scic_sds_controller_reset_hardware(struct scic_sds_controller *scic)
1646 {
1647 	/* Disable interrupts so we dont take any spurious interrupts */
1648 	scic_controller_disable_interrupts(scic);
1649 
1650 	/* Reset the SCU */
1651 	writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control);
1652 
1653 	/* Delay for 1ms to before clearing the CQP and UFQPR. */
1654 	udelay(1000);
1655 
1656 	/* The write to the CQGR clears the CQP */
1657 	writel(0x00000000, &scic->smu_registers->completion_queue_get);
1658 
1659 	/* The write to the UFQGP clears the UFQPR */
1660 	writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
1661 }
1662 
1663 static void scic_sds_controller_resetting_state_enter(void *object)
1664 {
1665 	struct scic_sds_controller *scic = object;
1666 
1667 	scic_sds_controller_reset_hardware(scic);
1668 	sci_base_state_machine_change_state(&scic->state_machine,
1669 					    SCI_BASE_CONTROLLER_STATE_RESET);
1670 }
1671 
1672 static const struct sci_base_state scic_sds_controller_state_table[] = {
1673 	[SCI_BASE_CONTROLLER_STATE_INITIAL] = {
1674 		.enter_state = scic_sds_controller_initial_state_enter,
1675 	},
1676 	[SCI_BASE_CONTROLLER_STATE_RESET] = {},
1677 	[SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {},
1678 	[SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {},
1679 	[SCI_BASE_CONTROLLER_STATE_STARTING] = {
1680 		.exit_state  = scic_sds_controller_starting_state_exit,
1681 	},
1682 	[SCI_BASE_CONTROLLER_STATE_READY] = {
1683 		.enter_state = scic_sds_controller_ready_state_enter,
1684 		.exit_state  = scic_sds_controller_ready_state_exit,
1685 	},
1686 	[SCI_BASE_CONTROLLER_STATE_RESETTING] = {
1687 		.enter_state = scic_sds_controller_resetting_state_enter,
1688 	},
1689 	[SCI_BASE_CONTROLLER_STATE_STOPPING] = {
1690 		.enter_state = scic_sds_controller_stopping_state_enter,
1691 		.exit_state = scic_sds_controller_stopping_state_exit,
1692 	},
1693 	[SCI_BASE_CONTROLLER_STATE_STOPPED] = {},
1694 	[SCI_BASE_CONTROLLER_STATE_FAILED] = {}
1695 };
1696 
1697 static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic)
1698 {
1699 	/* these defaults are overridden by the platform / firmware */
1700 	struct isci_host *ihost = scic_to_ihost(scic);
1701 	u16 index;
1702 
1703 	/* Default to APC mode. */
1704 	scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1705 
1706 	/* Default to APC mode. */
1707 	scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1;
1708 
1709 	/* Default to no SSC operation. */
1710 	scic->oem_parameters.sds1.controller.do_enable_ssc = false;
1711 
1712 	/* Initialize all of the port parameter information to narrow ports. */
1713 	for (index = 0; index < SCI_MAX_PORTS; index++) {
1714 		scic->oem_parameters.sds1.ports[index].phy_mask = 0;
1715 	}
1716 
1717 	/* Initialize all of the phy parameter information. */
1718 	for (index = 0; index < SCI_MAX_PHYS; index++) {
1719 		/* Default to 6G (i.e. Gen 3) for now. */
1720 		scic->user_parameters.sds1.phys[index].max_speed_generation = 3;
1721 
1722 		/* the frequencies cannot be 0 */
1723 		scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f;
1724 		scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff;
1725 		scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1726 
1727 		/*
1728 		 * Previous Vitesse based expanders had a arbitration issue that
1729 		 * is worked around by having the upper 32-bits of SAS address
1730 		 * with a value greater then the Vitesse company identifier.
1731 		 * Hence, usage of 0x5FCFFFFF. */
1732 		scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id;
1733 		scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF;
1734 	}
1735 
1736 	scic->user_parameters.sds1.stp_inactivity_timeout = 5;
1737 	scic->user_parameters.sds1.ssp_inactivity_timeout = 5;
1738 	scic->user_parameters.sds1.stp_max_occupancy_timeout = 5;
1739 	scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20;
1740 	scic->user_parameters.sds1.no_outbound_task_timeout = 20;
1741 }
1742 
1743 
1744 
1745 /**
1746  * scic_controller_construct() - This method will attempt to construct a
1747  *    controller object utilizing the supplied parameter information.
1748  * @c: This parameter specifies the controller to be constructed.
1749  * @scu_base: mapped base address of the scu registers
1750  * @smu_base: mapped base address of the smu registers
1751  *
1752  * Indicate if the controller was successfully constructed or if it failed in
1753  * some way. SCI_SUCCESS This value is returned if the controller was
1754  * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned
1755  * if the interrupt coalescence timer may cause SAS compliance issues for SMP
1756  * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE
1757  * This value is returned if the controller does not support the supplied type.
1758  * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the
1759  * controller does not support the supplied initialization data version.
1760  */
1761 static enum sci_status scic_controller_construct(struct scic_sds_controller *scic,
1762 					  void __iomem *scu_base,
1763 					  void __iomem *smu_base)
1764 {
1765 	struct isci_host *ihost = scic_to_ihost(scic);
1766 	u8 i;
1767 
1768 	sci_base_state_machine_construct(&scic->state_machine,
1769 		scic, scic_sds_controller_state_table,
1770 		SCI_BASE_CONTROLLER_STATE_INITIAL);
1771 
1772 	sci_base_state_machine_start(&scic->state_machine);
1773 
1774 	scic->scu_registers = scu_base;
1775 	scic->smu_registers = smu_base;
1776 
1777 	scic_sds_port_configuration_agent_construct(&scic->port_agent);
1778 
1779 	/* Construct the ports for this controller */
1780 	for (i = 0; i < SCI_MAX_PORTS; i++)
1781 		scic_sds_port_construct(&ihost->ports[i].sci, i, scic);
1782 	scic_sds_port_construct(&ihost->ports[i].sci, SCIC_SDS_DUMMY_PORT, scic);
1783 
1784 	/* Construct the phys for this controller */
1785 	for (i = 0; i < SCI_MAX_PHYS; i++) {
1786 		/* Add all the PHYs to the dummy port */
1787 		scic_sds_phy_construct(&ihost->phys[i].sci,
1788 				       &ihost->ports[SCI_MAX_PORTS].sci, i);
1789 	}
1790 
1791 	scic->invalid_phy_mask = 0;
1792 
1793 	/* Set the default maximum values */
1794 	scic->completion_event_entries      = SCU_EVENT_COUNT;
1795 	scic->completion_queue_entries      = SCU_COMPLETION_QUEUE_COUNT;
1796 	scic->remote_node_entries           = SCI_MAX_REMOTE_DEVICES;
1797 	scic->logical_port_entries          = SCI_MAX_PORTS;
1798 	scic->task_context_entries          = SCU_IO_REQUEST_COUNT;
1799 	scic->uf_control.buffers.count      = SCU_UNSOLICITED_FRAME_COUNT;
1800 	scic->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT;
1801 
1802 	/* Initialize the User and OEM parameters to default values. */
1803 	scic_sds_controller_set_default_config_parameters(scic);
1804 
1805 	return scic_controller_reset(scic);
1806 }
1807 
1808 int scic_oem_parameters_validate(struct scic_sds_oem_params *oem)
1809 {
1810 	int i;
1811 
1812 	for (i = 0; i < SCI_MAX_PORTS; i++)
1813 		if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1814 			return -EINVAL;
1815 
1816 	for (i = 0; i < SCI_MAX_PHYS; i++)
1817 		if (oem->phys[i].sas_address.high == 0 &&
1818 		    oem->phys[i].sas_address.low == 0)
1819 			return -EINVAL;
1820 
1821 	if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1822 		for (i = 0; i < SCI_MAX_PHYS; i++)
1823 			if (oem->ports[i].phy_mask != 0)
1824 				return -EINVAL;
1825 	} else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1826 		u8 phy_mask = 0;
1827 
1828 		for (i = 0; i < SCI_MAX_PHYS; i++)
1829 			phy_mask |= oem->ports[i].phy_mask;
1830 
1831 		if (phy_mask == 0)
1832 			return -EINVAL;
1833 	} else
1834 		return -EINVAL;
1835 
1836 	if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
1837 		return -EINVAL;
1838 
1839 	return 0;
1840 }
1841 
1842 static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic,
1843 					union scic_oem_parameters *scic_parms)
1844 {
1845 	u32 state = scic->state_machine.current_state_id;
1846 
1847 	if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
1848 	    state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
1849 	    state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
1850 
1851 		if (scic_oem_parameters_validate(&scic_parms->sds1))
1852 			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1853 		scic->oem_parameters.sds1 = scic_parms->sds1;
1854 
1855 		return SCI_SUCCESS;
1856 	}
1857 
1858 	return SCI_FAILURE_INVALID_STATE;
1859 }
1860 
1861 void scic_oem_parameters_get(
1862 	struct scic_sds_controller *scic,
1863 	union scic_oem_parameters *scic_parms)
1864 {
1865 	memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms));
1866 }
1867 
1868 static void scic_sds_controller_timeout_handler(void *_scic)
1869 {
1870 	struct scic_sds_controller *scic = _scic;
1871 	struct isci_host *ihost = scic_to_ihost(scic);
1872 	struct sci_base_state_machine *sm = &scic->state_machine;
1873 
1874 	if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STARTING)
1875 		scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT);
1876 	else if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STOPPING) {
1877 		sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_FAILED);
1878 		isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
1879 	} else	/* / @todo Now what do we want to do in this case? */
1880 		dev_err(scic_to_dev(scic),
1881 			"%s: Controller timer fired when controller was not "
1882 			"in a state being timed.\n",
1883 			__func__);
1884 }
1885 
1886 static enum sci_status scic_sds_controller_initialize_phy_startup(struct scic_sds_controller *scic)
1887 {
1888 	struct isci_host *ihost = scic_to_ihost(scic);
1889 
1890 	scic->phy_startup_timer = isci_timer_create(ihost,
1891 						    scic,
1892 						    scic_sds_controller_phy_startup_timeout_handler);
1893 
1894 	if (scic->phy_startup_timer == NULL)
1895 		return SCI_FAILURE_INSUFFICIENT_RESOURCES;
1896 	else {
1897 		scic->next_phy_to_start = 0;
1898 		scic->phy_startup_timer_pending = false;
1899 	}
1900 
1901 	return SCI_SUCCESS;
1902 }
1903 
1904 static void scic_sds_controller_power_control_timer_start(struct scic_sds_controller *scic)
1905 {
1906 	isci_timer_start(scic->power_control.timer,
1907 			 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1908 
1909 	scic->power_control.timer_started = true;
1910 }
1911 
1912 static void scic_sds_controller_power_control_timer_stop(struct scic_sds_controller *scic)
1913 {
1914 	if (scic->power_control.timer_started) {
1915 		isci_timer_stop(scic->power_control.timer);
1916 		scic->power_control.timer_started = false;
1917 	}
1918 }
1919 
1920 static void scic_sds_controller_power_control_timer_restart(struct scic_sds_controller *scic)
1921 {
1922 	scic_sds_controller_power_control_timer_stop(scic);
1923 	scic_sds_controller_power_control_timer_start(scic);
1924 }
1925 
1926 static void scic_sds_controller_power_control_timer_handler(
1927 	void *controller)
1928 {
1929 	struct scic_sds_controller *scic;
1930 
1931 	scic = (struct scic_sds_controller *)controller;
1932 
1933 	scic->power_control.phys_granted_power = 0;
1934 
1935 	if (scic->power_control.phys_waiting == 0) {
1936 		scic->power_control.timer_started = false;
1937 	} else {
1938 		struct scic_sds_phy *sci_phy = NULL;
1939 		u8 i;
1940 
1941 		for (i = 0;
1942 		     (i < SCI_MAX_PHYS)
1943 		     && (scic->power_control.phys_waiting != 0);
1944 		     i++) {
1945 			if (scic->power_control.requesters[i] != NULL) {
1946 				if (scic->power_control.phys_granted_power <
1947 				    scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
1948 					sci_phy = scic->power_control.requesters[i];
1949 					scic->power_control.requesters[i] = NULL;
1950 					scic->power_control.phys_waiting--;
1951 					scic->power_control.phys_granted_power++;
1952 					scic_sds_phy_consume_power_handler(sci_phy);
1953 				} else {
1954 					break;
1955 				}
1956 			}
1957 		}
1958 
1959 		/*
1960 		 * It doesn't matter if the power list is empty, we need to start the
1961 		 * timer in case another phy becomes ready.
1962 		 */
1963 		scic_sds_controller_power_control_timer_start(scic);
1964 	}
1965 }
1966 
1967 /**
1968  * This method inserts the phy in the stagger spinup control queue.
1969  * @scic:
1970  *
1971  *
1972  */
1973 void scic_sds_controller_power_control_queue_insert(
1974 	struct scic_sds_controller *scic,
1975 	struct scic_sds_phy *sci_phy)
1976 {
1977 	BUG_ON(sci_phy == NULL);
1978 
1979 	if (scic->power_control.phys_granted_power <
1980 	    scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
1981 		scic->power_control.phys_granted_power++;
1982 		scic_sds_phy_consume_power_handler(sci_phy);
1983 
1984 		/*
1985 		 * stop and start the power_control timer. When the timer fires, the
1986 		 * no_of_phys_granted_power will be set to 0
1987 		 */
1988 		scic_sds_controller_power_control_timer_restart(scic);
1989 	} else {
1990 		/* Add the phy in the waiting list */
1991 		scic->power_control.requesters[sci_phy->phy_index] = sci_phy;
1992 		scic->power_control.phys_waiting++;
1993 	}
1994 }
1995 
1996 /**
1997  * This method removes the phy from the stagger spinup control queue.
1998  * @scic:
1999  *
2000  *
2001  */
2002 void scic_sds_controller_power_control_queue_remove(
2003 	struct scic_sds_controller *scic,
2004 	struct scic_sds_phy *sci_phy)
2005 {
2006 	BUG_ON(sci_phy == NULL);
2007 
2008 	if (scic->power_control.requesters[sci_phy->phy_index] != NULL) {
2009 		scic->power_control.phys_waiting--;
2010 	}
2011 
2012 	scic->power_control.requesters[sci_phy->phy_index] = NULL;
2013 }
2014 
2015 #define AFE_REGISTER_WRITE_DELAY 10
2016 
2017 /* Initialize the AFE for this phy index. We need to read the AFE setup from
2018  * the OEM parameters
2019  */
2020 static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic)
2021 {
2022 	const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
2023 	u32 afe_status;
2024 	u32 phy_id;
2025 
2026 	/* Clear DFX Status registers */
2027 	writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0);
2028 	udelay(AFE_REGISTER_WRITE_DELAY);
2029 
2030 	if (is_b0()) {
2031 		/* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
2032 		 * Timer, PM Stagger Timer */
2033 		writel(0x0007BFFF, &scic->scu_registers->afe.afe_pmsn_master_control2);
2034 		udelay(AFE_REGISTER_WRITE_DELAY);
2035 	}
2036 
2037 	/* Configure bias currents to normal */
2038 	if (is_a0())
2039 		writel(0x00005500, &scic->scu_registers->afe.afe_bias_control);
2040 	else if (is_a2())
2041 		writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control);
2042 	else if (is_b0())
2043 		writel(0x00005F00, &scic->scu_registers->afe.afe_bias_control);
2044 
2045 	udelay(AFE_REGISTER_WRITE_DELAY);
2046 
2047 	/* Enable PLL */
2048 	if (is_b0())
2049 		writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0);
2050 	else
2051 		writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0);
2052 
2053 	udelay(AFE_REGISTER_WRITE_DELAY);
2054 
2055 	/* Wait for the PLL to lock */
2056 	do {
2057 		afe_status = readl(&scic->scu_registers->afe.afe_common_block_status);
2058 		udelay(AFE_REGISTER_WRITE_DELAY);
2059 	} while ((afe_status & 0x00001000) == 0);
2060 
2061 	if (is_a0() || is_a2()) {
2062 		/* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
2063 		writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0);
2064 		udelay(AFE_REGISTER_WRITE_DELAY);
2065 	}
2066 
2067 	for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
2068 		const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
2069 
2070 		if (is_b0()) {
2071 			 /* Configure transmitter SSC parameters */
2072 			writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
2073 			udelay(AFE_REGISTER_WRITE_DELAY);
2074 		} else {
2075 			/*
2076 			 * All defaults, except the Receive Word Alignament/Comma Detect
2077 			 * Enable....(0xe800) */
2078 			writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
2079 			udelay(AFE_REGISTER_WRITE_DELAY);
2080 
2081 			writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
2082 			udelay(AFE_REGISTER_WRITE_DELAY);
2083 		}
2084 
2085 		/*
2086 		 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2087 		 * & increase TX int & ext bias 20%....(0xe85c) */
2088 		if (is_a0())
2089 			writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2090 		else if (is_a2())
2091 			writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2092 		else {
2093 			 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
2094 			writel(0x000003d7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2095 			udelay(AFE_REGISTER_WRITE_DELAY);
2096 
2097 			/*
2098 			 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2099 			 * & increase TX int & ext bias 20%....(0xe85c) */
2100 			writel(0x000003d4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2101 		}
2102 		udelay(AFE_REGISTER_WRITE_DELAY);
2103 
2104 		if (is_a0() || is_a2()) {
2105 			/* Enable TX equalization (0xe824) */
2106 			writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2107 			udelay(AFE_REGISTER_WRITE_DELAY);
2108 		}
2109 
2110 		/*
2111 		 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
2112 		 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
2113 		writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
2114 		udelay(AFE_REGISTER_WRITE_DELAY);
2115 
2116 		/* Leave DFE/FFE on */
2117 		if (is_a0())
2118 			writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2119 		else if (is_a2())
2120 			writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2121 		else {
2122 			writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2123 			udelay(AFE_REGISTER_WRITE_DELAY);
2124 			/* Enable TX equalization (0xe824) */
2125 			writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2126 		}
2127 		udelay(AFE_REGISTER_WRITE_DELAY);
2128 
2129 		writel(oem_phy->afe_tx_amp_control0,
2130 			&scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2131 		udelay(AFE_REGISTER_WRITE_DELAY);
2132 
2133 		writel(oem_phy->afe_tx_amp_control1,
2134 			&scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2135 		udelay(AFE_REGISTER_WRITE_DELAY);
2136 
2137 		writel(oem_phy->afe_tx_amp_control2,
2138 			&scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2139 		udelay(AFE_REGISTER_WRITE_DELAY);
2140 
2141 		writel(oem_phy->afe_tx_amp_control3,
2142 			&scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2143 		udelay(AFE_REGISTER_WRITE_DELAY);
2144 	}
2145 
2146 	/* Transfer control to the PEs */
2147 	writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0);
2148 	udelay(AFE_REGISTER_WRITE_DELAY);
2149 }
2150 
2151 static enum sci_status scic_controller_set_mode(struct scic_sds_controller *scic,
2152 						enum sci_controller_mode operating_mode)
2153 {
2154 	enum sci_status status          = SCI_SUCCESS;
2155 
2156 	if ((scic->state_machine.current_state_id ==
2157 				SCI_BASE_CONTROLLER_STATE_INITIALIZING) ||
2158 	    (scic->state_machine.current_state_id ==
2159 				SCI_BASE_CONTROLLER_STATE_INITIALIZED)) {
2160 		switch (operating_mode) {
2161 		case SCI_MODE_SPEED:
2162 			scic->remote_node_entries      = SCI_MAX_REMOTE_DEVICES;
2163 			scic->task_context_entries     = SCU_IO_REQUEST_COUNT;
2164 			scic->uf_control.buffers.count =
2165 				SCU_UNSOLICITED_FRAME_COUNT;
2166 			scic->completion_event_entries = SCU_EVENT_COUNT;
2167 			scic->completion_queue_entries =
2168 				SCU_COMPLETION_QUEUE_COUNT;
2169 			break;
2170 
2171 		case SCI_MODE_SIZE:
2172 			scic->remote_node_entries      = SCI_MIN_REMOTE_DEVICES;
2173 			scic->task_context_entries     = SCI_MIN_IO_REQUESTS;
2174 			scic->uf_control.buffers.count =
2175 				SCU_MIN_UNSOLICITED_FRAMES;
2176 			scic->completion_event_entries = SCU_MIN_EVENTS;
2177 			scic->completion_queue_entries =
2178 				SCU_MIN_COMPLETION_QUEUE_ENTRIES;
2179 			break;
2180 
2181 		default:
2182 			status = SCI_FAILURE_INVALID_PARAMETER_VALUE;
2183 			break;
2184 		}
2185 	} else
2186 		status = SCI_FAILURE_INVALID_STATE;
2187 
2188 	return status;
2189 }
2190 
2191 static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic)
2192 {
2193 	struct isci_host *ihost = scic_to_ihost(scic);
2194 	scic->power_control.timer = isci_timer_create(ihost,
2195 						      scic,
2196 					scic_sds_controller_power_control_timer_handler);
2197 
2198 	memset(scic->power_control.requesters, 0,
2199 	       sizeof(scic->power_control.requesters));
2200 
2201 	scic->power_control.phys_waiting = 0;
2202 	scic->power_control.phys_granted_power = 0;
2203 }
2204 
2205 static enum sci_status scic_controller_initialize(struct scic_sds_controller *scic)
2206 {
2207 	struct sci_base_state_machine *sm = &scic->state_machine;
2208 	enum sci_status result = SCI_SUCCESS;
2209 	struct isci_host *ihost = scic_to_ihost(scic);
2210 	u32 index, state;
2211 
2212 	if (scic->state_machine.current_state_id !=
2213 	    SCI_BASE_CONTROLLER_STATE_RESET) {
2214 		dev_warn(scic_to_dev(scic),
2215 			 "SCIC Controller initialize operation requested "
2216 			 "in invalid state\n");
2217 		return SCI_FAILURE_INVALID_STATE;
2218 	}
2219 
2220 	sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_INITIALIZING);
2221 
2222 	scic->timeout_timer = isci_timer_create(ihost, scic,
2223 						scic_sds_controller_timeout_handler);
2224 
2225 	scic_sds_controller_initialize_phy_startup(scic);
2226 
2227 	scic_sds_controller_initialize_power_control(scic);
2228 
2229 	/*
2230 	 * There is nothing to do here for B0 since we do not have to
2231 	 * program the AFE registers.
2232 	 * / @todo The AFE settings are supposed to be correct for the B0 but
2233 	 * /       presently they seem to be wrong. */
2234 	scic_sds_controller_afe_initialization(scic);
2235 
2236 	if (result == SCI_SUCCESS) {
2237 		u32 status;
2238 		u32 terminate_loop;
2239 
2240 		/* Take the hardware out of reset */
2241 		writel(0, &scic->smu_registers->soft_reset_control);
2242 
2243 		/*
2244 		 * / @todo Provide meaningfull error code for hardware failure
2245 		 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2246 		result = SCI_FAILURE;
2247 		terminate_loop = 100;
2248 
2249 		while (terminate_loop-- && (result != SCI_SUCCESS)) {
2250 			/* Loop until the hardware reports success */
2251 			udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2252 			status = readl(&scic->smu_registers->control_status);
2253 
2254 			if ((status & SCU_RAM_INIT_COMPLETED) ==
2255 					SCU_RAM_INIT_COMPLETED)
2256 				result = SCI_SUCCESS;
2257 		}
2258 	}
2259 
2260 	if (result == SCI_SUCCESS) {
2261 		u32 max_supported_ports;
2262 		u32 max_supported_devices;
2263 		u32 max_supported_io_requests;
2264 		u32 device_context_capacity;
2265 
2266 		/*
2267 		 * Determine what are the actaul device capacities that the
2268 		 * hardware will support */
2269 		device_context_capacity =
2270 			readl(&scic->smu_registers->device_context_capacity);
2271 
2272 
2273 		max_supported_ports = smu_dcc_get_max_ports(device_context_capacity);
2274 		max_supported_devices = smu_dcc_get_max_remote_node_context(device_context_capacity);
2275 		max_supported_io_requests = smu_dcc_get_max_task_context(device_context_capacity);
2276 
2277 		/*
2278 		 * Make all PEs that are unassigned match up with the
2279 		 * logical ports
2280 		 */
2281 		for (index = 0; index < max_supported_ports; index++) {
2282 			struct scu_port_task_scheduler_group_registers __iomem
2283 				*ptsg = &scic->scu_registers->peg0.ptsg;
2284 
2285 			writel(index, &ptsg->protocol_engine[index]);
2286 		}
2287 
2288 		/* Record the smaller of the two capacity values */
2289 		scic->logical_port_entries =
2290 			min(max_supported_ports, scic->logical_port_entries);
2291 
2292 		scic->task_context_entries =
2293 			min(max_supported_io_requests,
2294 			    scic->task_context_entries);
2295 
2296 		scic->remote_node_entries =
2297 			min(max_supported_devices, scic->remote_node_entries);
2298 
2299 		/*
2300 		 * Now that we have the correct hardware reported minimum values
2301 		 * build the MDL for the controller.  Default to a performance
2302 		 * configuration.
2303 		 */
2304 		scic_controller_set_mode(scic, SCI_MODE_SPEED);
2305 	}
2306 
2307 	/* Initialize hardware PCI Relaxed ordering in DMA engines */
2308 	if (result == SCI_SUCCESS) {
2309 		u32 dma_configuration;
2310 
2311 		/* Configure the payload DMA */
2312 		dma_configuration =
2313 			readl(&scic->scu_registers->sdma.pdma_configuration);
2314 		dma_configuration |=
2315 			SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2316 		writel(dma_configuration,
2317 			&scic->scu_registers->sdma.pdma_configuration);
2318 
2319 		/* Configure the control DMA */
2320 		dma_configuration =
2321 			readl(&scic->scu_registers->sdma.cdma_configuration);
2322 		dma_configuration |=
2323 			SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2324 		writel(dma_configuration,
2325 			&scic->scu_registers->sdma.cdma_configuration);
2326 	}
2327 
2328 	/*
2329 	 * Initialize the PHYs before the PORTs because the PHY registers
2330 	 * are accessed during the port initialization.
2331 	 */
2332 	if (result == SCI_SUCCESS) {
2333 		/* Initialize the phys */
2334 		for (index = 0;
2335 		     (result == SCI_SUCCESS) && (index < SCI_MAX_PHYS);
2336 		     index++) {
2337 			result = scic_sds_phy_initialize(
2338 				&ihost->phys[index].sci,
2339 				&scic->scu_registers->peg0.pe[index].tl,
2340 				&scic->scu_registers->peg0.pe[index].ll);
2341 		}
2342 	}
2343 
2344 	if (result == SCI_SUCCESS) {
2345 		/* Initialize the logical ports */
2346 		for (index = 0;
2347 		     (index < scic->logical_port_entries) &&
2348 		     (result == SCI_SUCCESS);
2349 		     index++) {
2350 			result = scic_sds_port_initialize(
2351 				&ihost->ports[index].sci,
2352 				&scic->scu_registers->peg0.ptsg.port[index],
2353 				&scic->scu_registers->peg0.ptsg.protocol_engine,
2354 				&scic->scu_registers->peg0.viit[index]);
2355 		}
2356 	}
2357 
2358 	if (result == SCI_SUCCESS)
2359 		result = scic_sds_port_configuration_agent_initialize(
2360 				scic,
2361 				&scic->port_agent);
2362 
2363 	/* Advance the controller state machine */
2364 	if (result == SCI_SUCCESS)
2365 		state = SCI_BASE_CONTROLLER_STATE_INITIALIZED;
2366 	else
2367 		state = SCI_BASE_CONTROLLER_STATE_FAILED;
2368 	sci_base_state_machine_change_state(sm, state);
2369 
2370 	return result;
2371 }
2372 
2373 static enum sci_status scic_user_parameters_set(
2374 	struct scic_sds_controller *scic,
2375 	union scic_user_parameters *scic_parms)
2376 {
2377 	u32 state = scic->state_machine.current_state_id;
2378 
2379 	if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
2380 	    state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
2381 	    state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
2382 		u16 index;
2383 
2384 		/*
2385 		 * Validate the user parameters.  If they are not legal, then
2386 		 * return a failure.
2387 		 */
2388 		for (index = 0; index < SCI_MAX_PHYS; index++) {
2389 			struct sci_phy_user_params *user_phy;
2390 
2391 			user_phy = &scic_parms->sds1.phys[index];
2392 
2393 			if (!((user_phy->max_speed_generation <=
2394 						SCIC_SDS_PARM_MAX_SPEED) &&
2395 			      (user_phy->max_speed_generation >
2396 						SCIC_SDS_PARM_NO_SPEED)))
2397 				return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2398 
2399 			if (user_phy->in_connection_align_insertion_frequency <
2400 					3)
2401 				return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2402 
2403 			if ((user_phy->in_connection_align_insertion_frequency <
2404 						3) ||
2405 			    (user_phy->align_insertion_frequency == 0) ||
2406 			    (user_phy->
2407 				notify_enable_spin_up_insertion_frequency ==
2408 						0))
2409 				return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2410 		}
2411 
2412 		if ((scic_parms->sds1.stp_inactivity_timeout == 0) ||
2413 		    (scic_parms->sds1.ssp_inactivity_timeout == 0) ||
2414 		    (scic_parms->sds1.stp_max_occupancy_timeout == 0) ||
2415 		    (scic_parms->sds1.ssp_max_occupancy_timeout == 0) ||
2416 		    (scic_parms->sds1.no_outbound_task_timeout == 0))
2417 			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2418 
2419 		memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms));
2420 
2421 		return SCI_SUCCESS;
2422 	}
2423 
2424 	return SCI_FAILURE_INVALID_STATE;
2425 }
2426 
2427 static int scic_controller_mem_init(struct scic_sds_controller *scic)
2428 {
2429 	struct device *dev = scic_to_dev(scic);
2430 	dma_addr_t dma_handle;
2431 	enum sci_status result;
2432 
2433 	scic->completion_queue = dmam_alloc_coherent(dev,
2434 			scic->completion_queue_entries * sizeof(u32),
2435 			&dma_handle, GFP_KERNEL);
2436 	if (!scic->completion_queue)
2437 		return -ENOMEM;
2438 
2439 	writel(lower_32_bits(dma_handle),
2440 		&scic->smu_registers->completion_queue_lower);
2441 	writel(upper_32_bits(dma_handle),
2442 		&scic->smu_registers->completion_queue_upper);
2443 
2444 	scic->remote_node_context_table = dmam_alloc_coherent(dev,
2445 			scic->remote_node_entries *
2446 				sizeof(union scu_remote_node_context),
2447 			&dma_handle, GFP_KERNEL);
2448 	if (!scic->remote_node_context_table)
2449 		return -ENOMEM;
2450 
2451 	writel(lower_32_bits(dma_handle),
2452 		&scic->smu_registers->remote_node_context_lower);
2453 	writel(upper_32_bits(dma_handle),
2454 		&scic->smu_registers->remote_node_context_upper);
2455 
2456 	scic->task_context_table = dmam_alloc_coherent(dev,
2457 			scic->task_context_entries *
2458 				sizeof(struct scu_task_context),
2459 			&dma_handle, GFP_KERNEL);
2460 	if (!scic->task_context_table)
2461 		return -ENOMEM;
2462 
2463 	writel(lower_32_bits(dma_handle),
2464 		&scic->smu_registers->host_task_table_lower);
2465 	writel(upper_32_bits(dma_handle),
2466 		&scic->smu_registers->host_task_table_upper);
2467 
2468 	result = scic_sds_unsolicited_frame_control_construct(scic);
2469 	if (result)
2470 		return result;
2471 
2472 	/*
2473 	 * Inform the silicon as to the location of the UF headers and
2474 	 * address table.
2475 	 */
2476 	writel(lower_32_bits(scic->uf_control.headers.physical_address),
2477 		&scic->scu_registers->sdma.uf_header_base_address_lower);
2478 	writel(upper_32_bits(scic->uf_control.headers.physical_address),
2479 		&scic->scu_registers->sdma.uf_header_base_address_upper);
2480 
2481 	writel(lower_32_bits(scic->uf_control.address_table.physical_address),
2482 		&scic->scu_registers->sdma.uf_address_table_lower);
2483 	writel(upper_32_bits(scic->uf_control.address_table.physical_address),
2484 		&scic->scu_registers->sdma.uf_address_table_upper);
2485 
2486 	return 0;
2487 }
2488 
2489 int isci_host_init(struct isci_host *isci_host)
2490 {
2491 	int err = 0, i;
2492 	enum sci_status status;
2493 	union scic_oem_parameters oem;
2494 	union scic_user_parameters scic_user_params;
2495 	struct isci_pci_info *pci_info = to_pci_info(isci_host->pdev);
2496 
2497 	isci_timer_list_construct(isci_host);
2498 
2499 	spin_lock_init(&isci_host->state_lock);
2500 	spin_lock_init(&isci_host->scic_lock);
2501 	spin_lock_init(&isci_host->queue_lock);
2502 	init_waitqueue_head(&isci_host->eventq);
2503 
2504 	isci_host_change_state(isci_host, isci_starting);
2505 	isci_host->can_queue = ISCI_CAN_QUEUE_VAL;
2506 
2507 	status = scic_controller_construct(&isci_host->sci, scu_base(isci_host),
2508 					   smu_base(isci_host));
2509 
2510 	if (status != SCI_SUCCESS) {
2511 		dev_err(&isci_host->pdev->dev,
2512 			"%s: scic_controller_construct failed - status = %x\n",
2513 			__func__,
2514 			status);
2515 		return -ENODEV;
2516 	}
2517 
2518 	isci_host->sas_ha.dev = &isci_host->pdev->dev;
2519 	isci_host->sas_ha.lldd_ha = isci_host;
2520 
2521 	/*
2522 	 * grab initial values stored in the controller object for OEM and USER
2523 	 * parameters
2524 	 */
2525 	isci_user_parameters_get(isci_host, &scic_user_params);
2526 	status = scic_user_parameters_set(&isci_host->sci,
2527 					  &scic_user_params);
2528 	if (status != SCI_SUCCESS) {
2529 		dev_warn(&isci_host->pdev->dev,
2530 			 "%s: scic_user_parameters_set failed\n",
2531 			 __func__);
2532 		return -ENODEV;
2533 	}
2534 
2535 	scic_oem_parameters_get(&isci_host->sci, &oem);
2536 
2537 	/* grab any OEM parameters specified in orom */
2538 	if (pci_info->orom) {
2539 		status = isci_parse_oem_parameters(&oem,
2540 						   pci_info->orom,
2541 						   isci_host->id);
2542 		if (status != SCI_SUCCESS) {
2543 			dev_warn(&isci_host->pdev->dev,
2544 				 "parsing firmware oem parameters failed\n");
2545 			return -EINVAL;
2546 		}
2547 	}
2548 
2549 	status = scic_oem_parameters_set(&isci_host->sci, &oem);
2550 	if (status != SCI_SUCCESS) {
2551 		dev_warn(&isci_host->pdev->dev,
2552 				"%s: scic_oem_parameters_set failed\n",
2553 				__func__);
2554 		return -ENODEV;
2555 	}
2556 
2557 	tasklet_init(&isci_host->completion_tasklet,
2558 		     isci_host_completion_routine, (unsigned long)isci_host);
2559 
2560 	INIT_LIST_HEAD(&isci_host->requests_to_complete);
2561 	INIT_LIST_HEAD(&isci_host->requests_to_errorback);
2562 
2563 	spin_lock_irq(&isci_host->scic_lock);
2564 	status = scic_controller_initialize(&isci_host->sci);
2565 	spin_unlock_irq(&isci_host->scic_lock);
2566 	if (status != SCI_SUCCESS) {
2567 		dev_warn(&isci_host->pdev->dev,
2568 			 "%s: scic_controller_initialize failed -"
2569 			 " status = 0x%x\n",
2570 			 __func__, status);
2571 		return -ENODEV;
2572 	}
2573 
2574 	err = scic_controller_mem_init(&isci_host->sci);
2575 	if (err)
2576 		return err;
2577 
2578 	isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev,
2579 					       sizeof(struct isci_request),
2580 					       SLAB_HWCACHE_ALIGN, 0);
2581 
2582 	if (!isci_host->dma_pool)
2583 		return -ENOMEM;
2584 
2585 	for (i = 0; i < SCI_MAX_PORTS; i++)
2586 		isci_port_init(&isci_host->ports[i], isci_host, i);
2587 
2588 	for (i = 0; i < SCI_MAX_PHYS; i++)
2589 		isci_phy_init(&isci_host->phys[i], isci_host, i);
2590 
2591 	for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
2592 		struct isci_remote_device *idev = &isci_host->devices[i];
2593 
2594 		INIT_LIST_HEAD(&idev->reqs_in_process);
2595 		INIT_LIST_HEAD(&idev->node);
2596 		spin_lock_init(&idev->state_lock);
2597 	}
2598 
2599 	return 0;
2600 }
2601 
2602 void scic_sds_controller_link_up(struct scic_sds_controller *scic,
2603 		struct scic_sds_port *port, struct scic_sds_phy *phy)
2604 {
2605 	switch (scic->state_machine.current_state_id) {
2606 	case SCI_BASE_CONTROLLER_STATE_STARTING:
2607 		scic_sds_controller_phy_timer_stop(scic);
2608 		scic->port_agent.link_up_handler(scic, &scic->port_agent,
2609 						 port, phy);
2610 		scic_sds_controller_start_next_phy(scic);
2611 		break;
2612 	case SCI_BASE_CONTROLLER_STATE_READY:
2613 		scic->port_agent.link_up_handler(scic, &scic->port_agent,
2614 						 port, phy);
2615 		break;
2616 	default:
2617 		dev_dbg(scic_to_dev(scic),
2618 			"%s: SCIC Controller linkup event from phy %d in "
2619 			"unexpected state %d\n", __func__, phy->phy_index,
2620 			scic->state_machine.current_state_id);
2621 	}
2622 }
2623 
2624 void scic_sds_controller_link_down(struct scic_sds_controller *scic,
2625 		struct scic_sds_port *port, struct scic_sds_phy *phy)
2626 {
2627 	switch (scic->state_machine.current_state_id) {
2628 	case SCI_BASE_CONTROLLER_STATE_STARTING:
2629 	case SCI_BASE_CONTROLLER_STATE_READY:
2630 		scic->port_agent.link_down_handler(scic, &scic->port_agent,
2631 						   port, phy);
2632 		break;
2633 	default:
2634 		dev_dbg(scic_to_dev(scic),
2635 			"%s: SCIC Controller linkdown event from phy %d in "
2636 			"unexpected state %d\n",
2637 			__func__,
2638 			phy->phy_index,
2639 			scic->state_machine.current_state_id);
2640 	}
2641 }
2642 
2643 /**
2644  * This is a helper method to determine if any remote devices on this
2645  * controller are still in the stopping state.
2646  *
2647  */
2648 static bool scic_sds_controller_has_remote_devices_stopping(
2649 	struct scic_sds_controller *controller)
2650 {
2651 	u32 index;
2652 
2653 	for (index = 0; index < controller->remote_node_entries; index++) {
2654 		if ((controller->device_table[index] != NULL) &&
2655 		   (controller->device_table[index]->state_machine.current_state_id
2656 		    == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING))
2657 			return true;
2658 	}
2659 
2660 	return false;
2661 }
2662 
2663 /**
2664  * This method is called by the remote device to inform the controller
2665  * object that the remote device has stopped.
2666  */
2667 void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic,
2668 					       struct scic_sds_remote_device *sci_dev)
2669 {
2670 	if (scic->state_machine.current_state_id !=
2671 	    SCI_BASE_CONTROLLER_STATE_STOPPING) {
2672 		dev_dbg(scic_to_dev(scic),
2673 			"SCIC Controller 0x%p remote device stopped event "
2674 			"from device 0x%p in unexpected state %d\n",
2675 			scic, sci_dev,
2676 			scic->state_machine.current_state_id);
2677 		return;
2678 	}
2679 
2680 	if (!scic_sds_controller_has_remote_devices_stopping(scic)) {
2681 		sci_base_state_machine_change_state(&scic->state_machine,
2682 				SCI_BASE_CONTROLLER_STATE_STOPPED);
2683 	}
2684 }
2685 
2686 /**
2687  * This method will write to the SCU PCP register the request value. The method
2688  *    is used to suspend/resume ports, devices, and phys.
2689  * @scic:
2690  *
2691  *
2692  */
2693 void scic_sds_controller_post_request(
2694 	struct scic_sds_controller *scic,
2695 	u32 request)
2696 {
2697 	dev_dbg(scic_to_dev(scic),
2698 		"%s: SCIC Controller 0x%p post request 0x%08x\n",
2699 		__func__,
2700 		scic,
2701 		request);
2702 
2703 	writel(request, &scic->smu_registers->post_context_port);
2704 }
2705 
2706 /**
2707  * This method will copy the soft copy of the task context into the physical
2708  *    memory accessible by the controller.
2709  * @scic: This parameter specifies the controller for which to copy
2710  *    the task context.
2711  * @sci_req: This parameter specifies the request for which the task
2712  *    context is being copied.
2713  *
2714  * After this call is made the SCIC_SDS_IO_REQUEST object will always point to
2715  * the physical memory version of the task context. Thus, all subsequent
2716  * updates to the task context are performed in the TC table (i.e. DMAable
2717  * memory). none
2718  */
2719 void scic_sds_controller_copy_task_context(
2720 	struct scic_sds_controller *scic,
2721 	struct scic_sds_request *sci_req)
2722 {
2723 	struct scu_task_context *task_context_buffer;
2724 
2725 	task_context_buffer = scic_sds_controller_get_task_context_buffer(
2726 		scic, sci_req->io_tag);
2727 
2728 	memcpy(task_context_buffer,
2729 	       sci_req->task_context_buffer,
2730 	       offsetof(struct scu_task_context, sgl_snapshot_ac));
2731 
2732 	/*
2733 	 * Now that the soft copy of the TC has been copied into the TC
2734 	 * table accessible by the silicon.  Thus, any further changes to
2735 	 * the TC (e.g. TC termination) occur in the appropriate location. */
2736 	sci_req->task_context_buffer = task_context_buffer;
2737 }
2738 
2739 /**
2740  * This method returns the task context buffer for the given io tag.
2741  * @scic:
2742  * @io_tag:
2743  *
2744  * struct scu_task_context*
2745  */
2746 struct scu_task_context *scic_sds_controller_get_task_context_buffer(
2747 	struct scic_sds_controller *scic,
2748 	u16 io_tag
2749 	) {
2750 	u16 task_index = scic_sds_io_tag_get_index(io_tag);
2751 
2752 	if (task_index < scic->task_context_entries) {
2753 		return &scic->task_context_table[task_index];
2754 	}
2755 
2756 	return NULL;
2757 }
2758 
2759 struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
2760 					     u16 io_tag)
2761 {
2762 	u16 task_index;
2763 	u16 task_sequence;
2764 
2765 	task_index = scic_sds_io_tag_get_index(io_tag);
2766 
2767 	if (task_index  < scic->task_context_entries) {
2768 		if (scic->io_request_table[task_index] != NULL) {
2769 			task_sequence = scic_sds_io_tag_get_sequence(io_tag);
2770 
2771 			if (task_sequence == scic->io_request_sequence[task_index]) {
2772 				return scic->io_request_table[task_index];
2773 			}
2774 		}
2775 	}
2776 
2777 	return NULL;
2778 }
2779 
2780 /**
2781  * This method allocates remote node index and the reserves the remote node
2782  *    context space for use. This method can fail if there are no more remote
2783  *    node index available.
2784  * @scic: This is the controller object which contains the set of
2785  *    free remote node ids
2786  * @sci_dev: This is the device object which is requesting the a remote node
2787  *    id
2788  * @node_id: This is the remote node id that is assinged to the device if one
2789  *    is available
2790  *
2791  * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2792  * node index available.
2793  */
2794 enum sci_status scic_sds_controller_allocate_remote_node_context(
2795 	struct scic_sds_controller *scic,
2796 	struct scic_sds_remote_device *sci_dev,
2797 	u16 *node_id)
2798 {
2799 	u16 node_index;
2800 	u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
2801 
2802 	node_index = scic_sds_remote_node_table_allocate_remote_node(
2803 		&scic->available_remote_nodes, remote_node_count
2804 		);
2805 
2806 	if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2807 		scic->device_table[node_index] = sci_dev;
2808 
2809 		*node_id = node_index;
2810 
2811 		return SCI_SUCCESS;
2812 	}
2813 
2814 	return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2815 }
2816 
2817 /**
2818  * This method frees the remote node index back to the available pool.  Once
2819  *    this is done the remote node context buffer is no longer valid and can
2820  *    not be used.
2821  * @scic:
2822  * @sci_dev:
2823  * @node_id:
2824  *
2825  */
2826 void scic_sds_controller_free_remote_node_context(
2827 	struct scic_sds_controller *scic,
2828 	struct scic_sds_remote_device *sci_dev,
2829 	u16 node_id)
2830 {
2831 	u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
2832 
2833 	if (scic->device_table[node_id] == sci_dev) {
2834 		scic->device_table[node_id] = NULL;
2835 
2836 		scic_sds_remote_node_table_release_remote_node_index(
2837 			&scic->available_remote_nodes, remote_node_count, node_id
2838 			);
2839 	}
2840 }
2841 
2842 /**
2843  * This method returns the union scu_remote_node_context for the specified remote
2844  *    node id.
2845  * @scic:
2846  * @node_id:
2847  *
2848  * union scu_remote_node_context*
2849  */
2850 union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
2851 	struct scic_sds_controller *scic,
2852 	u16 node_id
2853 	) {
2854 	if (
2855 		(node_id < scic->remote_node_entries)
2856 		&& (scic->device_table[node_id] != NULL)
2857 		) {
2858 		return &scic->remote_node_context_table[node_id];
2859 	}
2860 
2861 	return NULL;
2862 }
2863 
2864 /**
2865  *
2866  * @resposne_buffer: This is the buffer into which the D2H register FIS will be
2867  *    constructed.
2868  * @frame_header: This is the frame header returned by the hardware.
2869  * @frame_buffer: This is the frame buffer returned by the hardware.
2870  *
2871  * This method will combind the frame header and frame buffer to create a SATA
2872  * D2H register FIS none
2873  */
2874 void scic_sds_controller_copy_sata_response(
2875 	void *response_buffer,
2876 	void *frame_header,
2877 	void *frame_buffer)
2878 {
2879 	memcpy(response_buffer, frame_header, sizeof(u32));
2880 
2881 	memcpy(response_buffer + sizeof(u32),
2882 	       frame_buffer,
2883 	       sizeof(struct dev_to_host_fis) - sizeof(u32));
2884 }
2885 
2886 /**
2887  * This method releases the frame once this is done the frame is available for
2888  *    re-use by the hardware.  The data contained in the frame header and frame
2889  *    buffer is no longer valid. The UF queue get pointer is only updated if UF
2890  *    control indicates this is appropriate.
2891  * @scic:
2892  * @frame_index:
2893  *
2894  */
2895 void scic_sds_controller_release_frame(
2896 	struct scic_sds_controller *scic,
2897 	u32 frame_index)
2898 {
2899 	if (scic_sds_unsolicited_frame_control_release_frame(
2900 		    &scic->uf_control, frame_index) == true)
2901 		writel(scic->uf_control.get,
2902 			&scic->scu_registers->sdma.unsolicited_frame_get_pointer);
2903 }
2904 
2905 /**
2906  * scic_controller_start_io() - This method is called by the SCI user to
2907  *    send/start an IO request. If the method invocation is successful, then
2908  *    the IO request has been queued to the hardware for processing.
2909  * @controller: the handle to the controller object for which to start an IO
2910  *    request.
2911  * @remote_device: the handle to the remote device object for which to start an
2912  *    IO request.
2913  * @io_request: the handle to the io request object to start.
2914  * @io_tag: This parameter specifies a previously allocated IO tag that the
2915  *    user desires to be utilized for this request. This parameter is optional.
2916  *     The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value
2917  *    for this parameter.
2918  *
2919  * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
2920  * to ensure that each of the methods that may allocate or free available IO
2921  * tags are handled in a mutually exclusive manner.  This method is one of said
2922  * methods requiring proper critical code section protection (e.g. semaphore,
2923  * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags.  As a
2924  * result, it is expected the user will have set the NCQ tag field in the host
2925  * to device register FIS prior to calling this method.  There is also a
2926  * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking
2927  * the scic_controller_start_io() method. scic_controller_allocate_tag() for
2928  * more information on allocating a tag. Indicate if the controller
2929  * successfully started the IO request. SCI_SUCCESS if the IO request was
2930  * successfully started. Determine the failure situations and return values.
2931  */
2932 enum sci_status scic_controller_start_io(
2933 	struct scic_sds_controller *scic,
2934 	struct scic_sds_remote_device *rdev,
2935 	struct scic_sds_request *req,
2936 	u16 io_tag)
2937 {
2938 	enum sci_status status;
2939 
2940 	if (scic->state_machine.current_state_id !=
2941 	    SCI_BASE_CONTROLLER_STATE_READY) {
2942 		dev_warn(scic_to_dev(scic), "invalid state to start I/O");
2943 		return SCI_FAILURE_INVALID_STATE;
2944 	}
2945 
2946 	status = scic_sds_remote_device_start_io(scic, rdev, req);
2947 	if (status != SCI_SUCCESS)
2948 		return status;
2949 
2950 	scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
2951 	scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req));
2952 	return SCI_SUCCESS;
2953 }
2954 
2955 /**
2956  * scic_controller_terminate_request() - This method is called by the SCI Core
2957  *    user to terminate an ongoing (i.e. started) core IO request.  This does
2958  *    not abort the IO request at the target, but rather removes the IO request
2959  *    from the host controller.
2960  * @controller: the handle to the controller object for which to terminate a
2961  *    request.
2962  * @remote_device: the handle to the remote device object for which to
2963  *    terminate a request.
2964  * @request: the handle to the io or task management request object to
2965  *    terminate.
2966  *
2967  * Indicate if the controller successfully began the terminate process for the
2968  * IO request. SCI_SUCCESS if the terminate process was successfully started
2969  * for the request. Determine the failure situations and return values.
2970  */
2971 enum sci_status scic_controller_terminate_request(
2972 	struct scic_sds_controller *scic,
2973 	struct scic_sds_remote_device *rdev,
2974 	struct scic_sds_request *req)
2975 {
2976 	enum sci_status status;
2977 
2978 	if (scic->state_machine.current_state_id !=
2979 	    SCI_BASE_CONTROLLER_STATE_READY) {
2980 		dev_warn(scic_to_dev(scic),
2981 			 "invalid state to terminate request\n");
2982 		return SCI_FAILURE_INVALID_STATE;
2983 	}
2984 
2985 	status = scic_sds_io_request_terminate(req);
2986 	if (status != SCI_SUCCESS)
2987 		return status;
2988 
2989 	/*
2990 	 * Utilize the original post context command and or in the POST_TC_ABORT
2991 	 * request sub-type.
2992 	 */
2993 	scic_sds_controller_post_request(scic,
2994 		scic_sds_request_get_post_context(req) |
2995 		SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2996 	return SCI_SUCCESS;
2997 }
2998 
2999 /**
3000  * scic_controller_complete_io() - This method will perform core specific
3001  *    completion operations for an IO request.  After this method is invoked,
3002  *    the user should consider the IO request as invalid until it is properly
3003  *    reused (i.e. re-constructed).
3004  * @controller: The handle to the controller object for which to complete the
3005  *    IO request.
3006  * @remote_device: The handle to the remote device object for which to complete
3007  *    the IO request.
3008  * @io_request: the handle to the io request object to complete.
3009  *
3010  * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
3011  * to ensure that each of the methods that may allocate or free available IO
3012  * tags are handled in a mutually exclusive manner.  This method is one of said
3013  * methods requiring proper critical code section protection (e.g. semaphore,
3014  * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
3015  * Core user, using the scic_controller_allocate_io_tag() method, then it is
3016  * the responsibility of the caller to invoke the scic_controller_free_io_tag()
3017  * method to free the tag (i.e. this method will not free the IO tag). Indicate
3018  * if the controller successfully completed the IO request. SCI_SUCCESS if the
3019  * completion process was successful.
3020  */
3021 enum sci_status scic_controller_complete_io(
3022 	struct scic_sds_controller *scic,
3023 	struct scic_sds_remote_device *rdev,
3024 	struct scic_sds_request *request)
3025 {
3026 	enum sci_status status;
3027 	u16 index;
3028 
3029 	switch (scic->state_machine.current_state_id) {
3030 	case SCI_BASE_CONTROLLER_STATE_STOPPING:
3031 		/* XXX: Implement this function */
3032 		return SCI_FAILURE;
3033 	case SCI_BASE_CONTROLLER_STATE_READY:
3034 		status = scic_sds_remote_device_complete_io(scic, rdev, request);
3035 		if (status != SCI_SUCCESS)
3036 			return status;
3037 
3038 		index = scic_sds_io_tag_get_index(request->io_tag);
3039 		scic->io_request_table[index] = NULL;
3040 		return SCI_SUCCESS;
3041 	default:
3042 		dev_warn(scic_to_dev(scic), "invalid state to complete I/O");
3043 		return SCI_FAILURE_INVALID_STATE;
3044 	}
3045 
3046 }
3047 
3048 enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req)
3049 {
3050 	struct scic_sds_controller *scic = sci_req->owning_controller;
3051 
3052 	if (scic->state_machine.current_state_id !=
3053 	    SCI_BASE_CONTROLLER_STATE_READY) {
3054 		dev_warn(scic_to_dev(scic), "invalid state to continue I/O");
3055 		return SCI_FAILURE_INVALID_STATE;
3056 	}
3057 
3058 	scic->io_request_table[scic_sds_io_tag_get_index(sci_req->io_tag)] = sci_req;
3059 	scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req));
3060 	return SCI_SUCCESS;
3061 }
3062 
3063 /**
3064  * scic_controller_start_task() - This method is called by the SCIC user to
3065  *    send/start a framework task management request.
3066  * @controller: the handle to the controller object for which to start the task
3067  *    management request.
3068  * @remote_device: the handle to the remote device object for which to start
3069  *    the task management request.
3070  * @task_request: the handle to the task request object to start.
3071  * @io_tag: This parameter specifies a previously allocated IO tag that the
3072  *    user desires to be utilized for this request.  Note this not the io_tag
3073  *    of the request being managed.  It is to be utilized for the task request
3074  *    itself. This parameter is optional.  The user is allowed to supply
3075  *    SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter.
3076  *
3077  * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
3078  * to ensure that each of the methods that may allocate or free available IO
3079  * tags are handled in a mutually exclusive manner.  This method is one of said
3080  * methods requiring proper critical code section protection (e.g. semaphore,
3081  * spin-lock, etc.). - The user must synchronize this task with completion
3082  * queue processing.  If they are not synchronized then it is possible for the
3083  * io requests that are being managed by the task request can complete before
3084  * starting the task request. scic_controller_allocate_tag() for more
3085  * information on allocating a tag. Indicate if the controller successfully
3086  * started the IO request. SCI_TASK_SUCCESS if the task request was
3087  * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is
3088  * returned if there is/are task(s) outstanding that require termination or
3089  * completion before this request can succeed.
3090  */
3091 enum sci_task_status scic_controller_start_task(
3092 	struct scic_sds_controller *scic,
3093 	struct scic_sds_remote_device *rdev,
3094 	struct scic_sds_request *req,
3095 	u16 task_tag)
3096 {
3097 	enum sci_status status;
3098 
3099 	if (scic->state_machine.current_state_id !=
3100 	    SCI_BASE_CONTROLLER_STATE_READY) {
3101 		dev_warn(scic_to_dev(scic),
3102 			 "%s: SCIC Controller starting task from invalid "
3103 			 "state\n",
3104 			 __func__);
3105 		return SCI_TASK_FAILURE_INVALID_STATE;
3106 	}
3107 
3108 	status = scic_sds_remote_device_start_task(scic, rdev, req);
3109 	switch (status) {
3110 	case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
3111 		scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
3112 
3113 		/*
3114 		 * We will let framework know this task request started successfully,
3115 		 * although core is still woring on starting the request (to post tc when
3116 		 * RNC is resumed.)
3117 		 */
3118 		return SCI_SUCCESS;
3119 	case SCI_SUCCESS:
3120 		scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
3121 
3122 		scic_sds_controller_post_request(scic,
3123 			scic_sds_request_get_post_context(req));
3124 		break;
3125 	default:
3126 		break;
3127 	}
3128 
3129 	return status;
3130 }
3131 
3132 /**
3133  * scic_controller_allocate_io_tag() - This method will allocate a tag from the
3134  *    pool of free IO tags. Direct allocation of IO tags by the SCI Core user
3135  *    is optional. The scic_controller_start_io() method will allocate an IO
3136  *    tag if this method is not utilized and the tag is not supplied to the IO
3137  *    construct routine.  Direct allocation of IO tags may provide additional
3138  *    performance improvements in environments capable of supporting this usage
3139  *    model.  Additionally, direct allocation of IO tags also provides
3140  *    additional flexibility to the SCI Core user.  Specifically, the user may
3141  *    retain IO tags across the lives of multiple IO requests.
3142  * @controller: the handle to the controller object for which to allocate the
3143  *    tag.
3144  *
3145  * IO tags are a protected resource.  It is incumbent upon the SCI Core user to
3146  * ensure that each of the methods that may allocate or free available IO tags
3147  * are handled in a mutually exclusive manner.  This method is one of said
3148  * methods requiring proper critical code section protection (e.g. semaphore,
3149  * spin-lock, etc.). An unsigned integer representing an available IO tag.
3150  * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no
3151  * currently available tags to be allocated. All return other values indicate a
3152  * legitimate tag.
3153  */
3154 u16 scic_controller_allocate_io_tag(
3155 	struct scic_sds_controller *scic)
3156 {
3157 	u16 task_context;
3158 	u16 sequence_count;
3159 
3160 	if (!sci_pool_empty(scic->tci_pool)) {
3161 		sci_pool_get(scic->tci_pool, task_context);
3162 
3163 		sequence_count = scic->io_request_sequence[task_context];
3164 
3165 		return scic_sds_io_tag_construct(sequence_count, task_context);
3166 	}
3167 
3168 	return SCI_CONTROLLER_INVALID_IO_TAG;
3169 }
3170 
3171 /**
3172  * scic_controller_free_io_tag() - This method will free an IO tag to the pool
3173  *    of free IO tags. This method provides the SCI Core user more flexibility
3174  *    with regards to IO tags.  The user may desire to keep an IO tag after an
3175  *    IO request has completed, because they plan on re-using the tag for a
3176  *    subsequent IO request.  This method is only legal if the tag was
3177  *    allocated via scic_controller_allocate_io_tag().
3178  * @controller: This parameter specifies the handle to the controller object
3179  *    for which to free/return the tag.
3180  * @io_tag: This parameter represents the tag to be freed to the pool of
3181  *    available tags.
3182  *
3183  * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
3184  * to ensure that each of the methods that may allocate or free available IO
3185  * tags are handled in a mutually exclusive manner.  This method is one of said
3186  * methods requiring proper critical code section protection (e.g. semaphore,
3187  * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
3188  * Core user, using the scic_controller_allocate_io_tag() method, then it is
3189  * the responsibility of the caller to invoke this method to free the tag. This
3190  * method returns an indication of whether the tag was successfully put back
3191  * (freed) to the pool of available tags. SCI_SUCCESS This return value
3192  * indicates the tag was successfully placed into the pool of available IO
3193  * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag
3194  * is not a valid IO tag value.
3195  */
3196 enum sci_status scic_controller_free_io_tag(
3197 	struct scic_sds_controller *scic,
3198 	u16 io_tag)
3199 {
3200 	u16 sequence;
3201 	u16 index;
3202 
3203 	BUG_ON(io_tag == SCI_CONTROLLER_INVALID_IO_TAG);
3204 
3205 	sequence = scic_sds_io_tag_get_sequence(io_tag);
3206 	index    = scic_sds_io_tag_get_index(io_tag);
3207 
3208 	if (!sci_pool_full(scic->tci_pool)) {
3209 		if (sequence == scic->io_request_sequence[index]) {
3210 			scic_sds_io_sequence_increment(
3211 				scic->io_request_sequence[index]);
3212 
3213 			sci_pool_put(scic->tci_pool, index);
3214 
3215 			return SCI_SUCCESS;
3216 		}
3217 	}
3218 
3219 	return SCI_FAILURE_INVALID_IO_TAG;
3220 }
3221 
3222 
3223