xref: /linux/drivers/scsi/isci/port.c (revision aec499c75cf8e0b599be4d559e6922b613085f8f)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 
56 #include "isci.h"
57 #include "port.h"
58 #include "request.h"
59 
60 #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT  (1000)
61 #define SCU_DUMMY_INDEX    (0xFFFF)
62 
63 #undef C
64 #define C(a) (#a)
65 static const char *port_state_name(enum sci_port_states state)
66 {
67 	static const char * const strings[] = PORT_STATES;
68 
69 	return strings[state];
70 }
71 #undef C
72 
73 static struct device *sciport_to_dev(struct isci_port *iport)
74 {
75 	int i = iport->physical_port_index;
76 	struct isci_port *table;
77 	struct isci_host *ihost;
78 
79 	if (i == SCIC_SDS_DUMMY_PORT)
80 		i = SCI_MAX_PORTS+1;
81 
82 	table = iport - i;
83 	ihost = container_of(table, typeof(*ihost), ports[0]);
84 
85 	return &ihost->pdev->dev;
86 }
87 
88 static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
89 {
90 	u8 index;
91 
92 	proto->all = 0;
93 	for (index = 0; index < SCI_MAX_PHYS; index++) {
94 		struct isci_phy *iphy = iport->phy_table[index];
95 
96 		if (!iphy)
97 			continue;
98 		sci_phy_get_protocols(iphy, proto);
99 	}
100 }
101 
102 static u32 sci_port_get_phys(struct isci_port *iport)
103 {
104 	u32 index;
105 	u32 mask;
106 
107 	mask = 0;
108 	for (index = 0; index < SCI_MAX_PHYS; index++)
109 		if (iport->phy_table[index])
110 			mask |= (1 << index);
111 
112 	return mask;
113 }
114 
115 /**
116  * sci_port_get_properties() - This method simply returns the properties
117  *    regarding the port, such as: physical index, protocols, sas address, etc.
118  * @iport: this parameter specifies the port for which to retrieve the physical
119  *    index.
120  * @prop: This parameter specifies the properties structure into which to
121  *    copy the requested information.
122  *
123  * Indicate if the user specified a valid port. SCI_SUCCESS This value is
124  * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
125  * value is returned if the specified port is not valid.  When this value is
126  * returned, no data is copied to the properties output parameter.
127  */
128 enum sci_status sci_port_get_properties(struct isci_port *iport,
129 						struct sci_port_properties *prop)
130 {
131 	if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
132 		return SCI_FAILURE_INVALID_PORT;
133 
134 	prop->index = iport->logical_port_index;
135 	prop->phy_mask = sci_port_get_phys(iport);
136 	sci_port_get_sas_address(iport, &prop->local.sas_address);
137 	sci_port_get_protocols(iport, &prop->local.protocols);
138 	sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
139 
140 	return SCI_SUCCESS;
141 }
142 
143 static void sci_port_bcn_enable(struct isci_port *iport)
144 {
145 	struct isci_phy *iphy;
146 	u32 val;
147 	int i;
148 
149 	for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
150 		iphy = iport->phy_table[i];
151 		if (!iphy)
152 			continue;
153 		val = readl(&iphy->link_layer_registers->link_layer_control);
154 		/* clear the bit by writing 1. */
155 		writel(val, &iphy->link_layer_registers->link_layer_control);
156 	}
157 }
158 
159 static void isci_port_bc_change_received(struct isci_host *ihost,
160 					 struct isci_port *iport,
161 					 struct isci_phy *iphy)
162 {
163 	dev_dbg(&ihost->pdev->dev,
164 		"%s: isci_phy = %p, sas_phy = %p\n",
165 		__func__, iphy, &iphy->sas_phy);
166 
167 	sas_notify_port_event(&iphy->sas_phy,
168 			      PORTE_BROADCAST_RCVD, GFP_ATOMIC);
169 	sci_port_bcn_enable(iport);
170 }
171 
172 static void isci_port_link_up(struct isci_host *isci_host,
173 			      struct isci_port *iport,
174 			      struct isci_phy *iphy)
175 {
176 	unsigned long flags;
177 	struct sci_port_properties properties;
178 	unsigned long success = true;
179 
180 	dev_dbg(&isci_host->pdev->dev,
181 		"%s: isci_port = %p\n",
182 		__func__, iport);
183 
184 	spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
185 
186 	sci_port_get_properties(iport, &properties);
187 
188 	if (iphy->protocol == SAS_PROTOCOL_SATA) {
189 		u64 attached_sas_address;
190 
191 		iphy->sas_phy.oob_mode = SATA_OOB_MODE;
192 		iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
193 
194 		/*
195 		 * For direct-attached SATA devices, the SCI core will
196 		 * automagically assign a SAS address to the end device
197 		 * for the purpose of creating a port. This SAS address
198 		 * will not be the same as assigned to the PHY and needs
199 		 * to be obtained from struct sci_port_properties properties.
200 		 */
201 		attached_sas_address = properties.remote.sas_address.high;
202 		attached_sas_address <<= 32;
203 		attached_sas_address |= properties.remote.sas_address.low;
204 		swab64s(&attached_sas_address);
205 
206 		memcpy(&iphy->sas_phy.attached_sas_addr,
207 		       &attached_sas_address, sizeof(attached_sas_address));
208 	} else if (iphy->protocol == SAS_PROTOCOL_SSP) {
209 		iphy->sas_phy.oob_mode = SAS_OOB_MODE;
210 		iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
211 
212 		/* Copy the attached SAS address from the IAF */
213 		memcpy(iphy->sas_phy.attached_sas_addr,
214 		       iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
215 	} else {
216 		dev_err(&isci_host->pdev->dev, "%s: unknown target\n", __func__);
217 		success = false;
218 	}
219 
220 	iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
221 
222 	spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
223 
224 	/* Notify libsas that we have an address frame, if indeed
225 	 * we've found an SSP, SMP, or STP target */
226 	if (success)
227 		sas_notify_port_event(&iphy->sas_phy,
228 				      PORTE_BYTES_DMAED, GFP_ATOMIC);
229 }
230 
231 
232 /**
233  * isci_port_link_down() - This function is called by the sci core when a link
234  *    becomes inactive.
235  * @isci_host: This parameter specifies the isci host object.
236  * @isci_phy: This parameter specifies the isci phy with the active link.
237  * @isci_port: This parameter specifies the isci port with the active link.
238  *
239  */
240 static void isci_port_link_down(struct isci_host *isci_host,
241 				struct isci_phy *isci_phy,
242 				struct isci_port *isci_port)
243 {
244 	struct isci_remote_device *isci_device;
245 
246 	dev_dbg(&isci_host->pdev->dev,
247 		"%s: isci_port = %p\n", __func__, isci_port);
248 
249 	if (isci_port) {
250 
251 		/* check to see if this is the last phy on this port. */
252 		if (isci_phy->sas_phy.port &&
253 		    isci_phy->sas_phy.port->num_phys == 1) {
254 			/* change the state for all devices on this port.  The
255 			* next task sent to this device will be returned as
256 			* SAS_TASK_UNDELIVERED, and the scsi mid layer will
257 			* remove the target
258 			*/
259 			list_for_each_entry(isci_device,
260 					    &isci_port->remote_dev_list,
261 					    node) {
262 				dev_dbg(&isci_host->pdev->dev,
263 					"%s: isci_device = %p\n",
264 					__func__, isci_device);
265 				set_bit(IDEV_GONE, &isci_device->flags);
266 			}
267 		}
268 	}
269 
270 	/* Notify libsas of the borken link, this will trigger calls to our
271 	 * isci_port_deformed and isci_dev_gone functions.
272 	 */
273 	sas_phy_disconnected(&isci_phy->sas_phy);
274 	sas_notify_phy_event(&isci_phy->sas_phy,
275 			     PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
276 
277 	dev_dbg(&isci_host->pdev->dev,
278 		"%s: isci_port = %p - Done\n", __func__, isci_port);
279 }
280 
281 static bool is_port_ready_state(enum sci_port_states state)
282 {
283 	switch (state) {
284 	case SCI_PORT_READY:
285 	case SCI_PORT_SUB_WAITING:
286 	case SCI_PORT_SUB_OPERATIONAL:
287 	case SCI_PORT_SUB_CONFIGURING:
288 		return true;
289 	default:
290 		return false;
291 	}
292 }
293 
294 /* flag dummy rnc hanling when exiting a ready state */
295 static void port_state_machine_change(struct isci_port *iport,
296 				      enum sci_port_states state)
297 {
298 	struct sci_base_state_machine *sm = &iport->sm;
299 	enum sci_port_states old_state = sm->current_state_id;
300 
301 	if (is_port_ready_state(old_state) && !is_port_ready_state(state))
302 		iport->ready_exit = true;
303 
304 	sci_change_state(sm, state);
305 	iport->ready_exit = false;
306 }
307 
308 /**
309  * isci_port_hard_reset_complete() - This function is called by the sci core
310  *    when the hard reset complete notification has been received.
311  * @isci_port: This parameter specifies the sci port with the active link.
312  * @completion_status: This parameter specifies the core status for the reset
313  *    process.
314  *
315  */
316 static void isci_port_hard_reset_complete(struct isci_port *isci_port,
317 					  enum sci_status completion_status)
318 {
319 	struct isci_host *ihost = isci_port->owning_controller;
320 
321 	dev_dbg(&ihost->pdev->dev,
322 		"%s: isci_port = %p, completion_status=%x\n",
323 		     __func__, isci_port, completion_status);
324 
325 	/* Save the status of the hard reset from the port. */
326 	isci_port->hard_reset_status = completion_status;
327 
328 	if (completion_status != SCI_SUCCESS) {
329 
330 		/* The reset failed.  The port state is now SCI_PORT_FAILED. */
331 		if (isci_port->active_phy_mask == 0) {
332 			int phy_idx = isci_port->last_active_phy;
333 			struct isci_phy *iphy = &ihost->phys[phy_idx];
334 
335 			/* Generate the link down now to the host, since it
336 			 * was intercepted by the hard reset state machine when
337 			 * it really happened.
338 			 */
339 			isci_port_link_down(ihost, iphy, isci_port);
340 		}
341 		/* Advance the port state so that link state changes will be
342 		 * noticed.
343 		 */
344 		port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
345 
346 	}
347 	clear_bit(IPORT_RESET_PENDING, &isci_port->state);
348 	wake_up(&ihost->eventq);
349 
350 }
351 
352 /* This method will return a true value if the specified phy can be assigned to
353  * this port The following is a list of phys for each port that are allowed: -
354  * Port 0 - 3 2 1 0 - Port 1 -     1 - Port 2 - 3 2 - Port 3 - 3 This method
355  * doesn't preclude all configurations.  It merely ensures that a phy is part
356  * of the allowable set of phy identifiers for that port.  For example, one
357  * could assign phy 3 to port 0 and no other phys.  Please refer to
358  * sci_port_is_phy_mask_valid() for information regarding whether the
359  * phy_mask for a port can be supported. bool true if this is a valid phy
360  * assignment for the port false if this is not a valid phy assignment for the
361  * port
362  */
363 bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
364 {
365 	struct isci_host *ihost = iport->owning_controller;
366 	struct sci_user_parameters *user = &ihost->user_parameters;
367 
368 	/* Initialize to invalid value. */
369 	u32 existing_phy_index = SCI_MAX_PHYS;
370 	u32 index;
371 
372 	if ((iport->physical_port_index == 1) && (phy_index != 1))
373 		return false;
374 
375 	if (iport->physical_port_index == 3 && phy_index != 3)
376 		return false;
377 
378 	if (iport->physical_port_index == 2 &&
379 	    (phy_index == 0 || phy_index == 1))
380 		return false;
381 
382 	for (index = 0; index < SCI_MAX_PHYS; index++)
383 		if (iport->phy_table[index] && index != phy_index)
384 			existing_phy_index = index;
385 
386 	/* Ensure that all of the phys in the port are capable of
387 	 * operating at the same maximum link rate.
388 	 */
389 	if (existing_phy_index < SCI_MAX_PHYS &&
390 	    user->phys[phy_index].max_speed_generation !=
391 	    user->phys[existing_phy_index].max_speed_generation)
392 		return false;
393 
394 	return true;
395 }
396 
397 /**
398  * sci_port_is_phy_mask_valid()
399  * @iport: This is the port object for which to determine if the phy mask
400  *    can be supported.
401  * @phy_mask: Phy mask belonging to this port
402  *
403  * This method will return a true value if the port's phy mask can be supported
404  * by the SCU. The following is a list of valid PHY mask configurations for
405  * each port: - Port 0 - [[3  2] 1] 0 - Port 1 -        [1] - Port 2 - [[3] 2]
406  * - Port 3 -  [3] This method returns a boolean indication specifying if the
407  * phy mask can be supported. true if this is a valid phy assignment for the
408  * port false if this is not a valid phy assignment for the port
409  */
410 static bool sci_port_is_phy_mask_valid(
411 	struct isci_port *iport,
412 	u32 phy_mask)
413 {
414 	if (iport->physical_port_index == 0) {
415 		if (((phy_mask & 0x0F) == 0x0F)
416 		    || ((phy_mask & 0x03) == 0x03)
417 		    || ((phy_mask & 0x01) == 0x01)
418 		    || (phy_mask == 0))
419 			return true;
420 	} else if (iport->physical_port_index == 1) {
421 		if (((phy_mask & 0x02) == 0x02)
422 		    || (phy_mask == 0))
423 			return true;
424 	} else if (iport->physical_port_index == 2) {
425 		if (((phy_mask & 0x0C) == 0x0C)
426 		    || ((phy_mask & 0x04) == 0x04)
427 		    || (phy_mask == 0))
428 			return true;
429 	} else if (iport->physical_port_index == 3) {
430 		if (((phy_mask & 0x08) == 0x08)
431 		    || (phy_mask == 0))
432 			return true;
433 	}
434 
435 	return false;
436 }
437 
438 /*
439  * This method retrieves a currently active (i.e. connected) phy contained in
440  * the port.  Currently, the lowest order phy that is connected is returned.
441  * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
442  * returned if there are no currently active (i.e. connected to a remote end
443  * point) phys contained in the port. All other values specify a struct sci_phy
444  * object that is active in the port.
445  */
446 static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
447 {
448 	u32 index;
449 	struct isci_phy *iphy;
450 
451 	for (index = 0; index < SCI_MAX_PHYS; index++) {
452 		/* Ensure that the phy is both part of the port and currently
453 		 * connected to the remote end-point.
454 		 */
455 		iphy = iport->phy_table[index];
456 		if (iphy && sci_port_active_phy(iport, iphy))
457 			return iphy;
458 	}
459 
460 	return NULL;
461 }
462 
463 static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
464 {
465 	/* Check to see if we can add this phy to a port
466 	 * that means that the phy is not part of a port and that the port does
467 	 * not already have a phy assinged to the phy index.
468 	 */
469 	if (!iport->phy_table[iphy->phy_index] &&
470 	    !phy_get_non_dummy_port(iphy) &&
471 	    sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
472 		/* Phy is being added in the stopped state so we are in MPC mode
473 		 * make logical port index = physical port index
474 		 */
475 		iport->logical_port_index = iport->physical_port_index;
476 		iport->phy_table[iphy->phy_index] = iphy;
477 		sci_phy_set_port(iphy, iport);
478 
479 		return SCI_SUCCESS;
480 	}
481 
482 	return SCI_FAILURE;
483 }
484 
485 static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
486 {
487 	/* Make sure that this phy is part of this port */
488 	if (iport->phy_table[iphy->phy_index] == iphy &&
489 	    phy_get_non_dummy_port(iphy) == iport) {
490 		struct isci_host *ihost = iport->owning_controller;
491 
492 		/* Yep it is assigned to this port so remove it */
493 		sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
494 		iport->phy_table[iphy->phy_index] = NULL;
495 		return SCI_SUCCESS;
496 	}
497 
498 	return SCI_FAILURE;
499 }
500 
501 void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
502 {
503 	u32 index;
504 
505 	sas->high = 0;
506 	sas->low  = 0;
507 	for (index = 0; index < SCI_MAX_PHYS; index++)
508 		if (iport->phy_table[index])
509 			sci_phy_get_sas_address(iport->phy_table[index], sas);
510 }
511 
512 void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
513 {
514 	struct isci_phy *iphy;
515 
516 	/*
517 	 * Ensure that the phy is both part of the port and currently
518 	 * connected to the remote end-point.
519 	 */
520 	iphy = sci_port_get_a_connected_phy(iport);
521 	if (iphy) {
522 		if (iphy->protocol != SAS_PROTOCOL_SATA) {
523 			sci_phy_get_attached_sas_address(iphy, sas);
524 		} else {
525 			sci_phy_get_sas_address(iphy, sas);
526 			sas->low += iphy->phy_index;
527 		}
528 	} else {
529 		sas->high = 0;
530 		sas->low  = 0;
531 	}
532 }
533 
534 /**
535  * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
536  *
537  * @iport: logical port on which we need to create the remote node context
538  * @rni: remote node index for this remote node context.
539  *
540  * This routine will construct a dummy remote node context data structure
541  * This structure will be posted to the hardware to work around a scheduler
542  * error in the hardware.
543  */
544 static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
545 {
546 	union scu_remote_node_context *rnc;
547 
548 	rnc = &iport->owning_controller->remote_node_context_table[rni];
549 
550 	memset(rnc, 0, sizeof(union scu_remote_node_context));
551 
552 	rnc->ssp.remote_sas_address_hi = 0;
553 	rnc->ssp.remote_sas_address_lo = 0;
554 
555 	rnc->ssp.remote_node_index = rni;
556 	rnc->ssp.remote_node_port_width = 1;
557 	rnc->ssp.logical_port_index = iport->physical_port_index;
558 
559 	rnc->ssp.nexus_loss_timer_enable = false;
560 	rnc->ssp.check_bit = false;
561 	rnc->ssp.is_valid = true;
562 	rnc->ssp.is_remote_node_context = true;
563 	rnc->ssp.function_number = 0;
564 	rnc->ssp.arbitration_wait_time = 0;
565 }
566 
567 /*
568  * construct a dummy task context data structure.  This
569  * structure will be posted to the hardwre to work around a scheduler error
570  * in the hardware.
571  */
572 static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
573 {
574 	struct isci_host *ihost = iport->owning_controller;
575 	struct scu_task_context *task_context;
576 
577 	task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
578 	memset(task_context, 0, sizeof(struct scu_task_context));
579 
580 	task_context->initiator_request = 1;
581 	task_context->connection_rate = 1;
582 	task_context->logical_port_index = iport->physical_port_index;
583 	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
584 	task_context->task_index = ISCI_TAG_TCI(tag);
585 	task_context->valid = SCU_TASK_CONTEXT_VALID;
586 	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
587 	task_context->remote_node_index = iport->reserved_rni;
588 	task_context->do_not_dma_ssp_good_response = 1;
589 	task_context->task_phase = 0x01;
590 }
591 
592 static void sci_port_destroy_dummy_resources(struct isci_port *iport)
593 {
594 	struct isci_host *ihost = iport->owning_controller;
595 
596 	if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
597 		isci_free_tag(ihost, iport->reserved_tag);
598 
599 	if (iport->reserved_rni != SCU_DUMMY_INDEX)
600 		sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
601 								     1, iport->reserved_rni);
602 
603 	iport->reserved_rni = SCU_DUMMY_INDEX;
604 	iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
605 }
606 
607 void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
608 {
609 	u8 index;
610 
611 	for (index = 0; index < SCI_MAX_PHYS; index++) {
612 		if (iport->active_phy_mask & (1 << index))
613 			sci_phy_setup_transport(iport->phy_table[index], device_id);
614 	}
615 }
616 
617 static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
618 {
619 	sci_phy_resume(iphy);
620 	iport->enabled_phy_mask |= 1 << iphy->phy_index;
621 }
622 
623 static void sci_port_activate_phy(struct isci_port *iport,
624 				  struct isci_phy *iphy,
625 				  u8 flags)
626 {
627 	struct isci_host *ihost = iport->owning_controller;
628 
629 	if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME))
630 		sci_phy_resume(iphy);
631 
632 	iport->active_phy_mask |= 1 << iphy->phy_index;
633 
634 	sci_controller_clear_invalid_phy(ihost, iphy);
635 
636 	if (flags & PF_NOTIFY)
637 		isci_port_link_up(ihost, iport, iphy);
638 }
639 
640 void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
641 			     bool do_notify_user)
642 {
643 	struct isci_host *ihost = iport->owning_controller;
644 
645 	iport->active_phy_mask &= ~(1 << iphy->phy_index);
646 	iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
647 	if (!iport->active_phy_mask)
648 		iport->last_active_phy = iphy->phy_index;
649 
650 	iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
651 
652 	/* Re-assign the phy back to the LP as if it were a narrow port for APC
653 	 * mode. For MPC mode, the phy will remain in the port.
654 	 */
655 	if (iport->owning_controller->oem_parameters.controller.mode_type ==
656 		SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
657 		writel(iphy->phy_index,
658 			&iport->port_pe_configuration_register[iphy->phy_index]);
659 
660 	if (do_notify_user == true)
661 		isci_port_link_down(ihost, iphy, iport);
662 }
663 
664 static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
665 {
666 	struct isci_host *ihost = iport->owning_controller;
667 
668 	/*
669 	 * Check to see if we have alreay reported this link as bad and if
670 	 * not go ahead and tell the SCI_USER that we have discovered an
671 	 * invalid link.
672 	 */
673 	if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
674 		ihost->invalid_phy_mask |= 1 << iphy->phy_index;
675 		dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
676 	}
677 }
678 
679 /**
680  * sci_port_general_link_up_handler - phy can be assigned to port?
681  * @iport: sci_port object for which has a phy that has gone link up.
682  * @iphy: This is the struct isci_phy object that has gone link up.
683  * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
684  *
685  * Determine if this phy can be assigned to this port . If the phy is
686  * not a valid PHY for this port then the function will notify the user.
687  * A PHY can only be part of a port if it's attached SAS ADDRESS is the
688  * same as all other PHYs in the same port.
689  */
690 static void sci_port_general_link_up_handler(struct isci_port *iport,
691 					     struct isci_phy *iphy,
692 					     u8 flags)
693 {
694 	struct sci_sas_address port_sas_address;
695 	struct sci_sas_address phy_sas_address;
696 
697 	sci_port_get_attached_sas_address(iport, &port_sas_address);
698 	sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
699 
700 	/* If the SAS address of the new phy matches the SAS address of
701 	 * other phys in the port OR this is the first phy in the port,
702 	 * then activate the phy and allow it to be used for operations
703 	 * in this port.
704 	 */
705 	if ((phy_sas_address.high == port_sas_address.high &&
706 	     phy_sas_address.low  == port_sas_address.low) ||
707 	    iport->active_phy_mask == 0) {
708 		struct sci_base_state_machine *sm = &iport->sm;
709 
710 		sci_port_activate_phy(iport, iphy, flags);
711 		if (sm->current_state_id == SCI_PORT_RESETTING)
712 			port_state_machine_change(iport, SCI_PORT_READY);
713 	} else
714 		sci_port_invalid_link_up(iport, iphy);
715 }
716 
717 
718 
719 /**
720  * sci_port_is_wide()
721  * This method returns false if the port only has a single phy object assigned.
722  *     If there are no phys or more than one phy then the method will return
723  *    true.
724  * @iport: The port for which the wide port condition is to be checked.
725  *
726  * bool true Is returned if this is a wide ported port. false Is returned if
727  * this is a narrow port.
728  */
729 static bool sci_port_is_wide(struct isci_port *iport)
730 {
731 	u32 index;
732 	u32 phy_count = 0;
733 
734 	for (index = 0; index < SCI_MAX_PHYS; index++) {
735 		if (iport->phy_table[index] != NULL) {
736 			phy_count++;
737 		}
738 	}
739 
740 	return phy_count != 1;
741 }
742 
743 /**
744  * sci_port_link_detected()
745  * This method is called by the PHY object when the link is detected. if the
746  *    port wants the PHY to continue on to the link up state then the port
747  *    layer must return true.  If the port object returns false the phy object
748  *    must halt its attempt to go link up.
749  * @iport: The port associated with the phy object.
750  * @iphy: The phy object that is trying to go link up.
751  *
752  * true if the phy object can continue to the link up condition. true Is
753  * returned if this phy can continue to the ready state. false Is returned if
754  * can not continue on to the ready state. This notification is in place for
755  * wide ports and direct attached phys.  Since there are no wide ported SATA
756  * devices this could become an invalid port configuration.
757  */
758 bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
759 {
760 	if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
761 	    (iphy->protocol == SAS_PROTOCOL_SATA)) {
762 		if (sci_port_is_wide(iport)) {
763 			sci_port_invalid_link_up(iport, iphy);
764 			return false;
765 		} else {
766 			struct isci_host *ihost = iport->owning_controller;
767 			struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
768 			writel(iphy->phy_index,
769 			       &dst_port->port_pe_configuration_register[iphy->phy_index]);
770 		}
771 	}
772 
773 	return true;
774 }
775 
776 static void port_timeout(struct timer_list *t)
777 {
778 	struct sci_timer *tmr = from_timer(tmr, t, timer);
779 	struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
780 	struct isci_host *ihost = iport->owning_controller;
781 	unsigned long flags;
782 	u32 current_state;
783 
784 	spin_lock_irqsave(&ihost->scic_lock, flags);
785 
786 	if (tmr->cancel)
787 		goto done;
788 
789 	current_state = iport->sm.current_state_id;
790 
791 	if (current_state == SCI_PORT_RESETTING) {
792 		/* if the port is still in the resetting state then the timeout
793 		 * fired before the reset completed.
794 		 */
795 		port_state_machine_change(iport, SCI_PORT_FAILED);
796 	} else if (current_state == SCI_PORT_STOPPED) {
797 		/* if the port is stopped then the start request failed In this
798 		 * case stay in the stopped state.
799 		 */
800 		dev_err(sciport_to_dev(iport),
801 			"%s: SCIC Port 0x%p failed to stop before timeout.\n",
802 			__func__,
803 			iport);
804 	} else if (current_state == SCI_PORT_STOPPING) {
805 		dev_dbg(sciport_to_dev(iport),
806 			"%s: port%d: stop complete timeout\n",
807 			__func__, iport->physical_port_index);
808 	} else {
809 		/* The port is in the ready state and we have a timer
810 		 * reporting a timeout this should not happen.
811 		 */
812 		dev_err(sciport_to_dev(iport),
813 			"%s: SCIC Port 0x%p is processing a timeout operation "
814 			"in state %d.\n", __func__, iport, current_state);
815 	}
816 
817 done:
818 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
819 }
820 
821 /* --------------------------------------------------------------------------- */
822 
823 /*
824  * This function updates the hardwares VIIT entry for this port.
825  */
826 static void sci_port_update_viit_entry(struct isci_port *iport)
827 {
828 	struct sci_sas_address sas_address;
829 
830 	sci_port_get_sas_address(iport, &sas_address);
831 
832 	writel(sas_address.high,
833 		&iport->viit_registers->initiator_sas_address_hi);
834 	writel(sas_address.low,
835 		&iport->viit_registers->initiator_sas_address_lo);
836 
837 	/* This value get cleared just in case its not already cleared */
838 	writel(0, &iport->viit_registers->reserved);
839 
840 	/* We are required to update the status register last */
841 	writel(SCU_VIIT_ENTRY_ID_VIIT |
842 	       SCU_VIIT_IPPT_INITIATOR |
843 	       ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
844 	       SCU_VIIT_STATUS_ALL_VALID,
845 	       &iport->viit_registers->status);
846 }
847 
848 enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
849 {
850 	u16 index;
851 	struct isci_phy *iphy;
852 	enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
853 
854 	/*
855 	 * Loop through all of the phys in this port and find the phy with the
856 	 * lowest maximum link rate. */
857 	for (index = 0; index < SCI_MAX_PHYS; index++) {
858 		iphy = iport->phy_table[index];
859 		if (iphy && sci_port_active_phy(iport, iphy) &&
860 		    iphy->max_negotiated_speed < max_allowed_speed)
861 			max_allowed_speed = iphy->max_negotiated_speed;
862 	}
863 
864 	return max_allowed_speed;
865 }
866 
867 static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
868 {
869 	u32 pts_control_value;
870 
871 	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
872 	pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
873 	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
874 }
875 
876 /**
877  * sci_port_post_dummy_request() - post dummy/workaround request
878  * @iport: port to post task
879  *
880  * Prevent the hardware scheduler from posting new requests to the front
881  * of the scheduler queue causing a starvation problem for currently
882  * ongoing requests.
883  *
884  */
885 static void sci_port_post_dummy_request(struct isci_port *iport)
886 {
887 	struct isci_host *ihost = iport->owning_controller;
888 	u16 tag = iport->reserved_tag;
889 	struct scu_task_context *tc;
890 	u32 command;
891 
892 	tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
893 	tc->abort = 0;
894 
895 	command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
896 		  iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
897 		  ISCI_TAG_TCI(tag);
898 
899 	sci_controller_post_request(ihost, command);
900 }
901 
902 /**
903  * sci_port_abort_dummy_request()
904  * This routine will abort the dummy request.  This will allow the hardware to
905  * power down parts of the silicon to save power.
906  *
907  * @iport: The port on which the task must be aborted.
908  *
909  */
910 static void sci_port_abort_dummy_request(struct isci_port *iport)
911 {
912 	struct isci_host *ihost = iport->owning_controller;
913 	u16 tag = iport->reserved_tag;
914 	struct scu_task_context *tc;
915 	u32 command;
916 
917 	tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
918 	tc->abort = 1;
919 
920 	command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
921 		  iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
922 		  ISCI_TAG_TCI(tag);
923 
924 	sci_controller_post_request(ihost, command);
925 }
926 
927 /**
928  * sci_port_resume_port_task_scheduler()
929  * @iport: This is the struct isci_port object to resume.
930  *
931  * This method will resume the port task scheduler for this port object. none
932  */
933 static void
934 sci_port_resume_port_task_scheduler(struct isci_port *iport)
935 {
936 	u32 pts_control_value;
937 
938 	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
939 	pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
940 	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
941 }
942 
943 static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
944 {
945 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
946 
947 	sci_port_suspend_port_task_scheduler(iport);
948 
949 	iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
950 
951 	if (iport->active_phy_mask != 0) {
952 		/* At least one of the phys on the port is ready */
953 		port_state_machine_change(iport,
954 					  SCI_PORT_SUB_OPERATIONAL);
955 	}
956 }
957 
958 static void scic_sds_port_ready_substate_waiting_exit(
959 					struct sci_base_state_machine *sm)
960 {
961 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
962 	sci_port_resume_port_task_scheduler(iport);
963 }
964 
965 static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
966 {
967 	u32 index;
968 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
969 	struct isci_host *ihost = iport->owning_controller;
970 
971 	dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
972 		__func__, iport->physical_port_index);
973 
974 	for (index = 0; index < SCI_MAX_PHYS; index++) {
975 		if (iport->phy_table[index]) {
976 			writel(iport->physical_port_index,
977 				&iport->port_pe_configuration_register[
978 					iport->phy_table[index]->phy_index]);
979 			if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
980 				sci_port_resume_phy(iport, iport->phy_table[index]);
981 		}
982 	}
983 
984 	sci_port_update_viit_entry(iport);
985 
986 	/*
987 	 * Post the dummy task for the port so the hardware can schedule
988 	 * io correctly
989 	 */
990 	sci_port_post_dummy_request(iport);
991 }
992 
993 static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
994 {
995 	struct isci_host *ihost = iport->owning_controller;
996 	u8 phys_index = iport->physical_port_index;
997 	union scu_remote_node_context *rnc;
998 	u16 rni = iport->reserved_rni;
999 	u32 command;
1000 
1001 	rnc = &ihost->remote_node_context_table[rni];
1002 
1003 	rnc->ssp.is_valid = false;
1004 
1005 	/* ensure the preceding tc abort request has reached the
1006 	 * controller and give it ample time to act before posting the rnc
1007 	 * invalidate
1008 	 */
1009 	readl(&ihost->smu_registers->interrupt_status); /* flush */
1010 	udelay(10);
1011 
1012 	command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
1013 		  phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1014 
1015 	sci_controller_post_request(ihost, command);
1016 }
1017 
1018 /**
1019  * sci_port_ready_substate_operational_exit()
1020  * @sm: This is the object which is cast to a struct isci_port object.
1021  *
1022  * This method will perform the actions required by the struct isci_port on
1023  * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
1024  * the port not ready and suspends the port task scheduler. none
1025  */
1026 static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
1027 {
1028 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1029 	struct isci_host *ihost = iport->owning_controller;
1030 
1031 	/*
1032 	 * Kill the dummy task for this port if it has not yet posted
1033 	 * the hardware will treat this as a NOP and just return abort
1034 	 * complete.
1035 	 */
1036 	sci_port_abort_dummy_request(iport);
1037 
1038 	dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1039 		__func__, iport->physical_port_index);
1040 
1041 	if (iport->ready_exit)
1042 		sci_port_invalidate_dummy_remote_node(iport);
1043 }
1044 
1045 static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
1046 {
1047 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1048 	struct isci_host *ihost = iport->owning_controller;
1049 
1050 	if (iport->active_phy_mask == 0) {
1051 		dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1052 			__func__, iport->physical_port_index);
1053 
1054 		port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
1055 	} else
1056 		port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
1057 }
1058 
1059 enum sci_status sci_port_start(struct isci_port *iport)
1060 {
1061 	struct isci_host *ihost = iport->owning_controller;
1062 	enum sci_status status = SCI_SUCCESS;
1063 	enum sci_port_states state;
1064 	u32 phy_mask;
1065 
1066 	state = iport->sm.current_state_id;
1067 	if (state != SCI_PORT_STOPPED) {
1068 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1069 			 __func__, port_state_name(state));
1070 		return SCI_FAILURE_INVALID_STATE;
1071 	}
1072 
1073 	if (iport->assigned_device_count > 0) {
1074 		/* TODO This is a start failure operation because
1075 		 * there are still devices assigned to this port.
1076 		 * There must be no devices assigned to a port on a
1077 		 * start operation.
1078 		 */
1079 		return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1080 	}
1081 
1082 	if (iport->reserved_rni == SCU_DUMMY_INDEX) {
1083 		u16 rni = sci_remote_node_table_allocate_remote_node(
1084 				&ihost->available_remote_nodes, 1);
1085 
1086 		if (rni != SCU_DUMMY_INDEX)
1087 			sci_port_construct_dummy_rnc(iport, rni);
1088 		else
1089 			status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1090 		iport->reserved_rni = rni;
1091 	}
1092 
1093 	if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1094 		u16 tag;
1095 
1096 		tag = isci_alloc_tag(ihost);
1097 		if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
1098 			status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1099 		else
1100 			sci_port_construct_dummy_task(iport, tag);
1101 		iport->reserved_tag = tag;
1102 	}
1103 
1104 	if (status == SCI_SUCCESS) {
1105 		phy_mask = sci_port_get_phys(iport);
1106 
1107 		/*
1108 		 * There are one or more phys assigned to this port.  Make sure
1109 		 * the port's phy mask is in fact legal and supported by the
1110 		 * silicon.
1111 		 */
1112 		if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
1113 			port_state_machine_change(iport,
1114 						  SCI_PORT_READY);
1115 
1116 			return SCI_SUCCESS;
1117 		}
1118 		status = SCI_FAILURE;
1119 	}
1120 
1121 	if (status != SCI_SUCCESS)
1122 		sci_port_destroy_dummy_resources(iport);
1123 
1124 	return status;
1125 }
1126 
1127 enum sci_status sci_port_stop(struct isci_port *iport)
1128 {
1129 	enum sci_port_states state;
1130 
1131 	state = iport->sm.current_state_id;
1132 	switch (state) {
1133 	case SCI_PORT_STOPPED:
1134 		return SCI_SUCCESS;
1135 	case SCI_PORT_SUB_WAITING:
1136 	case SCI_PORT_SUB_OPERATIONAL:
1137 	case SCI_PORT_SUB_CONFIGURING:
1138 	case SCI_PORT_RESETTING:
1139 		port_state_machine_change(iport,
1140 					  SCI_PORT_STOPPING);
1141 		return SCI_SUCCESS;
1142 	default:
1143 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1144 			 __func__, port_state_name(state));
1145 		return SCI_FAILURE_INVALID_STATE;
1146 	}
1147 }
1148 
1149 static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
1150 {
1151 	enum sci_status status = SCI_FAILURE_INVALID_PHY;
1152 	struct isci_phy *iphy = NULL;
1153 	enum sci_port_states state;
1154 	u32 phy_index;
1155 
1156 	state = iport->sm.current_state_id;
1157 	if (state != SCI_PORT_SUB_OPERATIONAL) {
1158 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1159 			 __func__, port_state_name(state));
1160 		return SCI_FAILURE_INVALID_STATE;
1161 	}
1162 
1163 	/* Select a phy on which we can send the hard reset request. */
1164 	for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
1165 		iphy = iport->phy_table[phy_index];
1166 		if (iphy && !sci_port_active_phy(iport, iphy)) {
1167 			/*
1168 			 * We found a phy but it is not ready select
1169 			 * different phy
1170 			 */
1171 			iphy = NULL;
1172 		}
1173 	}
1174 
1175 	/* If we have a phy then go ahead and start the reset procedure */
1176 	if (!iphy)
1177 		return status;
1178 	status = sci_phy_reset(iphy);
1179 
1180 	if (status != SCI_SUCCESS)
1181 		return status;
1182 
1183 	sci_mod_timer(&iport->timer, timeout);
1184 	iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
1185 
1186 	port_state_machine_change(iport, SCI_PORT_RESETTING);
1187 	return SCI_SUCCESS;
1188 }
1189 
1190 /**
1191  * sci_port_add_phy()
1192  * @iport: This parameter specifies the port in which the phy will be added.
1193  * @iphy: This parameter is the phy which is to be added to the port.
1194  *
1195  * This method will add a PHY to the selected port. This method returns an
1196  * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
1197  * status is a failure to add the phy to the port.
1198  */
1199 enum sci_status sci_port_add_phy(struct isci_port *iport,
1200 				      struct isci_phy *iphy)
1201 {
1202 	enum sci_status status;
1203 	enum sci_port_states state;
1204 
1205 	sci_port_bcn_enable(iport);
1206 
1207 	state = iport->sm.current_state_id;
1208 	switch (state) {
1209 	case SCI_PORT_STOPPED: {
1210 		struct sci_sas_address port_sas_address;
1211 
1212 		/* Read the port assigned SAS Address if there is one */
1213 		sci_port_get_sas_address(iport, &port_sas_address);
1214 
1215 		if (port_sas_address.high != 0 && port_sas_address.low != 0) {
1216 			struct sci_sas_address phy_sas_address;
1217 
1218 			/* Make sure that the PHY SAS Address matches the SAS Address
1219 			 * for this port
1220 			 */
1221 			sci_phy_get_sas_address(iphy, &phy_sas_address);
1222 
1223 			if (port_sas_address.high != phy_sas_address.high ||
1224 			    port_sas_address.low  != phy_sas_address.low)
1225 				return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1226 		}
1227 		return sci_port_set_phy(iport, iphy);
1228 	}
1229 	case SCI_PORT_SUB_WAITING:
1230 	case SCI_PORT_SUB_OPERATIONAL:
1231 		status = sci_port_set_phy(iport, iphy);
1232 
1233 		if (status != SCI_SUCCESS)
1234 			return status;
1235 
1236 		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1237 		iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1238 		port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
1239 
1240 		return status;
1241 	case SCI_PORT_SUB_CONFIGURING:
1242 		status = sci_port_set_phy(iport, iphy);
1243 
1244 		if (status != SCI_SUCCESS)
1245 			return status;
1246 		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
1247 
1248 		/* Re-enter the configuring state since this may be the last phy in
1249 		 * the port.
1250 		 */
1251 		port_state_machine_change(iport,
1252 					  SCI_PORT_SUB_CONFIGURING);
1253 		return SCI_SUCCESS;
1254 	default:
1255 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1256 			 __func__, port_state_name(state));
1257 		return SCI_FAILURE_INVALID_STATE;
1258 	}
1259 }
1260 
1261 /**
1262  * sci_port_remove_phy()
1263  * @iport: This parameter specifies the port in which the phy will be added.
1264  * @iphy: This parameter is the phy which is to be added to the port.
1265  *
1266  * This method will remove the PHY from the selected PORT. This method returns
1267  * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
1268  * other status is a failure to add the phy to the port.
1269  */
1270 enum sci_status sci_port_remove_phy(struct isci_port *iport,
1271 					 struct isci_phy *iphy)
1272 {
1273 	enum sci_status status;
1274 	enum sci_port_states state;
1275 
1276 	state = iport->sm.current_state_id;
1277 
1278 	switch (state) {
1279 	case SCI_PORT_STOPPED:
1280 		return sci_port_clear_phy(iport, iphy);
1281 	case SCI_PORT_SUB_OPERATIONAL:
1282 		status = sci_port_clear_phy(iport, iphy);
1283 		if (status != SCI_SUCCESS)
1284 			return status;
1285 
1286 		sci_port_deactivate_phy(iport, iphy, true);
1287 		iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1288 		port_state_machine_change(iport,
1289 					  SCI_PORT_SUB_CONFIGURING);
1290 		return SCI_SUCCESS;
1291 	case SCI_PORT_SUB_CONFIGURING:
1292 		status = sci_port_clear_phy(iport, iphy);
1293 
1294 		if (status != SCI_SUCCESS)
1295 			return status;
1296 		sci_port_deactivate_phy(iport, iphy, true);
1297 
1298 		/* Re-enter the configuring state since this may be the last phy in
1299 		 * the port
1300 		 */
1301 		port_state_machine_change(iport,
1302 					  SCI_PORT_SUB_CONFIGURING);
1303 		return SCI_SUCCESS;
1304 	default:
1305 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1306 			 __func__, port_state_name(state));
1307 		return SCI_FAILURE_INVALID_STATE;
1308 	}
1309 }
1310 
1311 enum sci_status sci_port_link_up(struct isci_port *iport,
1312 				      struct isci_phy *iphy)
1313 {
1314 	enum sci_port_states state;
1315 
1316 	state = iport->sm.current_state_id;
1317 	switch (state) {
1318 	case SCI_PORT_SUB_WAITING:
1319 		/* Since this is the first phy going link up for the port we
1320 		 * can just enable it and continue
1321 		 */
1322 		sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
1323 
1324 		port_state_machine_change(iport,
1325 					  SCI_PORT_SUB_OPERATIONAL);
1326 		return SCI_SUCCESS;
1327 	case SCI_PORT_SUB_OPERATIONAL:
1328 		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1329 		return SCI_SUCCESS;
1330 	case SCI_PORT_RESETTING:
1331 		/* TODO We should  make  sure  that  the phy  that  has gone
1332 		 * link up is the same one on which we sent the reset.  It is
1333 		 * possible that the phy on which we sent  the reset is not the
1334 		 * one that has  gone  link up  and we  want to make sure that
1335 		 * phy being reset  comes  back.  Consider the case where a
1336 		 * reset is sent but before the hardware processes the reset it
1337 		 * get a link up on  the  port because of a hot plug event.
1338 		 * because  of  the reset request this phy will go link down
1339 		 * almost immediately.
1340 		 */
1341 
1342 		/* In the resetting state we don't notify the user regarding
1343 		 * link up and link down notifications.
1344 		 */
1345 		sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
1346 		return SCI_SUCCESS;
1347 	default:
1348 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1349 			 __func__, port_state_name(state));
1350 		return SCI_FAILURE_INVALID_STATE;
1351 	}
1352 }
1353 
1354 enum sci_status sci_port_link_down(struct isci_port *iport,
1355 					struct isci_phy *iphy)
1356 {
1357 	enum sci_port_states state;
1358 
1359 	state = iport->sm.current_state_id;
1360 	switch (state) {
1361 	case SCI_PORT_SUB_OPERATIONAL:
1362 		sci_port_deactivate_phy(iport, iphy, true);
1363 
1364 		/* If there are no active phys left in the port, then
1365 		 * transition the port to the WAITING state until such time
1366 		 * as a phy goes link up
1367 		 */
1368 		if (iport->active_phy_mask == 0)
1369 			port_state_machine_change(iport,
1370 						  SCI_PORT_SUB_WAITING);
1371 		return SCI_SUCCESS;
1372 	case SCI_PORT_RESETTING:
1373 		/* In the resetting state we don't notify the user regarding
1374 		 * link up and link down notifications. */
1375 		sci_port_deactivate_phy(iport, iphy, false);
1376 		return SCI_SUCCESS;
1377 	default:
1378 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1379 			 __func__, port_state_name(state));
1380 		return SCI_FAILURE_INVALID_STATE;
1381 	}
1382 }
1383 
1384 enum sci_status sci_port_start_io(struct isci_port *iport,
1385 				  struct isci_remote_device *idev,
1386 				  struct isci_request *ireq)
1387 {
1388 	enum sci_port_states state;
1389 
1390 	state = iport->sm.current_state_id;
1391 	switch (state) {
1392 	case SCI_PORT_SUB_WAITING:
1393 		return SCI_FAILURE_INVALID_STATE;
1394 	case SCI_PORT_SUB_OPERATIONAL:
1395 		iport->started_request_count++;
1396 		return SCI_SUCCESS;
1397 	default:
1398 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1399 			 __func__, port_state_name(state));
1400 		return SCI_FAILURE_INVALID_STATE;
1401 	}
1402 }
1403 
1404 enum sci_status sci_port_complete_io(struct isci_port *iport,
1405 				     struct isci_remote_device *idev,
1406 				     struct isci_request *ireq)
1407 {
1408 	enum sci_port_states state;
1409 
1410 	state = iport->sm.current_state_id;
1411 	switch (state) {
1412 	case SCI_PORT_STOPPED:
1413 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1414 			 __func__, port_state_name(state));
1415 		return SCI_FAILURE_INVALID_STATE;
1416 	case SCI_PORT_STOPPING:
1417 		sci_port_decrement_request_count(iport);
1418 
1419 		if (iport->started_request_count == 0)
1420 			port_state_machine_change(iport,
1421 						  SCI_PORT_STOPPED);
1422 		break;
1423 	case SCI_PORT_READY:
1424 	case SCI_PORT_RESETTING:
1425 	case SCI_PORT_FAILED:
1426 	case SCI_PORT_SUB_WAITING:
1427 	case SCI_PORT_SUB_OPERATIONAL:
1428 		sci_port_decrement_request_count(iport);
1429 		break;
1430 	case SCI_PORT_SUB_CONFIGURING:
1431 		sci_port_decrement_request_count(iport);
1432 		if (iport->started_request_count == 0) {
1433 			port_state_machine_change(iport,
1434 						  SCI_PORT_SUB_OPERATIONAL);
1435 		}
1436 		break;
1437 	}
1438 	return SCI_SUCCESS;
1439 }
1440 
1441 static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
1442 {
1443 	u32 pts_control_value;
1444 
1445 	 /* enable the port task scheduler in a suspended state */
1446 	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1447 	pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
1448 	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1449 }
1450 
1451 static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
1452 {
1453 	u32 pts_control_value;
1454 
1455 	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1456 	pts_control_value &=
1457 		~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
1458 	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1459 }
1460 
1461 static void sci_port_post_dummy_remote_node(struct isci_port *iport)
1462 {
1463 	struct isci_host *ihost = iport->owning_controller;
1464 	u8 phys_index = iport->physical_port_index;
1465 	union scu_remote_node_context *rnc;
1466 	u16 rni = iport->reserved_rni;
1467 	u32 command;
1468 
1469 	rnc = &ihost->remote_node_context_table[rni];
1470 	rnc->ssp.is_valid = true;
1471 
1472 	command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
1473 		  phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1474 
1475 	sci_controller_post_request(ihost, command);
1476 
1477 	/* ensure hardware has seen the post rnc command and give it
1478 	 * ample time to act before sending the suspend
1479 	 */
1480 	readl(&ihost->smu_registers->interrupt_status); /* flush */
1481 	udelay(10);
1482 
1483 	command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
1484 		  phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1485 
1486 	sci_controller_post_request(ihost, command);
1487 }
1488 
1489 static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
1490 {
1491 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1492 
1493 	if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
1494 		/*
1495 		 * If we enter this state becasuse of a request to stop
1496 		 * the port then we want to disable the hardwares port
1497 		 * task scheduler. */
1498 		sci_port_disable_port_task_scheduler(iport);
1499 	}
1500 }
1501 
1502 static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
1503 {
1504 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1505 
1506 	/* Enable and suspend the port task scheduler */
1507 	sci_port_enable_port_task_scheduler(iport);
1508 }
1509 
1510 static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
1511 {
1512 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1513 	struct isci_host *ihost = iport->owning_controller;
1514 	u32 prev_state;
1515 
1516 	prev_state = iport->sm.previous_state_id;
1517 	if (prev_state  == SCI_PORT_RESETTING)
1518 		isci_port_hard_reset_complete(iport, SCI_SUCCESS);
1519 	else
1520 		dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1521 			__func__, iport->physical_port_index);
1522 
1523 	/* Post and suspend the dummy remote node context for this port. */
1524 	sci_port_post_dummy_remote_node(iport);
1525 
1526 	/* Start the ready substate machine */
1527 	port_state_machine_change(iport,
1528 				  SCI_PORT_SUB_WAITING);
1529 }
1530 
1531 static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
1532 {
1533 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1534 
1535 	sci_del_timer(&iport->timer);
1536 }
1537 
1538 static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
1539 {
1540 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1541 
1542 	sci_del_timer(&iport->timer);
1543 
1544 	sci_port_destroy_dummy_resources(iport);
1545 }
1546 
1547 static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
1548 {
1549 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1550 
1551 	isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
1552 }
1553 
1554 void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
1555 {
1556 	int phy_index;
1557 	u32 phy_mask = iport->active_phy_mask;
1558 
1559 	if (timeout)
1560 		++iport->hang_detect_users;
1561 	else if (iport->hang_detect_users > 1)
1562 		--iport->hang_detect_users;
1563 	else
1564 		iport->hang_detect_users = 0;
1565 
1566 	if (timeout || (iport->hang_detect_users == 0)) {
1567 		for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
1568 			if ((phy_mask >> phy_index) & 1) {
1569 				writel(timeout,
1570 				       &iport->phy_table[phy_index]
1571 					  ->link_layer_registers
1572 					  ->link_layer_hang_detection_timeout);
1573 			}
1574 		}
1575 	}
1576 }
1577 /* --------------------------------------------------------------------------- */
1578 
1579 static const struct sci_base_state sci_port_state_table[] = {
1580 	[SCI_PORT_STOPPED] = {
1581 		.enter_state = sci_port_stopped_state_enter,
1582 		.exit_state  = sci_port_stopped_state_exit
1583 	},
1584 	[SCI_PORT_STOPPING] = {
1585 		.exit_state  = sci_port_stopping_state_exit
1586 	},
1587 	[SCI_PORT_READY] = {
1588 		.enter_state = sci_port_ready_state_enter,
1589 	},
1590 	[SCI_PORT_SUB_WAITING] = {
1591 		.enter_state = sci_port_ready_substate_waiting_enter,
1592 		.exit_state  = scic_sds_port_ready_substate_waiting_exit,
1593 	},
1594 	[SCI_PORT_SUB_OPERATIONAL] = {
1595 		.enter_state = sci_port_ready_substate_operational_enter,
1596 		.exit_state  = sci_port_ready_substate_operational_exit
1597 	},
1598 	[SCI_PORT_SUB_CONFIGURING] = {
1599 		.enter_state = sci_port_ready_substate_configuring_enter
1600 	},
1601 	[SCI_PORT_RESETTING] = {
1602 		.exit_state  = sci_port_resetting_state_exit
1603 	},
1604 	[SCI_PORT_FAILED] = {
1605 		.enter_state = sci_port_failed_state_enter,
1606 	}
1607 };
1608 
1609 void sci_port_construct(struct isci_port *iport, u8 index,
1610 			     struct isci_host *ihost)
1611 {
1612 	sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
1613 
1614 	iport->logical_port_index  = SCIC_SDS_DUMMY_PORT;
1615 	iport->physical_port_index = index;
1616 	iport->active_phy_mask     = 0;
1617 	iport->enabled_phy_mask    = 0;
1618 	iport->last_active_phy     = 0;
1619 	iport->ready_exit	   = false;
1620 
1621 	iport->owning_controller = ihost;
1622 
1623 	iport->started_request_count = 0;
1624 	iport->assigned_device_count = 0;
1625 	iport->hang_detect_users = 0;
1626 
1627 	iport->reserved_rni = SCU_DUMMY_INDEX;
1628 	iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
1629 
1630 	sci_init_timer(&iport->timer, port_timeout);
1631 
1632 	iport->port_task_scheduler_registers = NULL;
1633 
1634 	for (index = 0; index < SCI_MAX_PHYS; index++)
1635 		iport->phy_table[index] = NULL;
1636 }
1637 
1638 void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
1639 {
1640 	struct isci_host *ihost = iport->owning_controller;
1641 
1642 	/* notify the user. */
1643 	isci_port_bc_change_received(ihost, iport, iphy);
1644 }
1645 
1646 static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
1647 {
1648 	wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
1649 }
1650 
1651 int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
1652 				 struct isci_phy *iphy)
1653 {
1654 	unsigned long flags;
1655 	enum sci_status status;
1656 	int ret = TMF_RESP_FUNC_COMPLETE;
1657 
1658 	dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
1659 		__func__, iport);
1660 
1661 	spin_lock_irqsave(&ihost->scic_lock, flags);
1662 	set_bit(IPORT_RESET_PENDING, &iport->state);
1663 
1664 	#define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
1665 	status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
1666 
1667 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1668 
1669 	if (status == SCI_SUCCESS) {
1670 		wait_port_reset(ihost, iport);
1671 
1672 		dev_dbg(&ihost->pdev->dev,
1673 			"%s: iport = %p; hard reset completion\n",
1674 			__func__, iport);
1675 
1676 		if (iport->hard_reset_status != SCI_SUCCESS) {
1677 			ret = TMF_RESP_FUNC_FAILED;
1678 
1679 			dev_err(&ihost->pdev->dev,
1680 				"%s: iport = %p; hard reset failed (0x%x)\n",
1681 				__func__, iport, iport->hard_reset_status);
1682 		}
1683 	} else {
1684 		clear_bit(IPORT_RESET_PENDING, &iport->state);
1685 		wake_up(&ihost->eventq);
1686 		ret = TMF_RESP_FUNC_FAILED;
1687 
1688 		dev_err(&ihost->pdev->dev,
1689 			"%s: iport = %p; sci_port_hard_reset call"
1690 			" failed 0x%x\n",
1691 			__func__, iport, status);
1692 
1693 	}
1694 	return ret;
1695 }
1696 
1697 int isci_ata_check_ready(struct domain_device *dev)
1698 {
1699 	struct isci_port *iport = dev->port->lldd_port;
1700 	struct isci_host *ihost = dev_to_ihost(dev);
1701 	struct isci_remote_device *idev;
1702 	unsigned long flags;
1703 	int rc = 0;
1704 
1705 	spin_lock_irqsave(&ihost->scic_lock, flags);
1706 	idev = isci_lookup_device(dev);
1707 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1708 
1709 	if (!idev)
1710 		goto out;
1711 
1712 	if (test_bit(IPORT_RESET_PENDING, &iport->state))
1713 		goto out;
1714 
1715 	rc = !!iport->active_phy_mask;
1716  out:
1717 	isci_put_device(idev);
1718 
1719 	return rc;
1720 }
1721 
1722 void isci_port_deformed(struct asd_sas_phy *phy)
1723 {
1724 	struct isci_host *ihost = phy->ha->lldd_ha;
1725 	struct isci_port *iport = phy->port->lldd_port;
1726 	unsigned long flags;
1727 	int i;
1728 
1729 	/* we got a port notification on a port that was subsequently
1730 	 * torn down and libsas is just now catching up
1731 	 */
1732 	if (!iport)
1733 		return;
1734 
1735 	spin_lock_irqsave(&ihost->scic_lock, flags);
1736 	for (i = 0; i < SCI_MAX_PHYS; i++) {
1737 		if (iport->active_phy_mask & 1 << i)
1738 			break;
1739 	}
1740 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1741 
1742 	if (i >= SCI_MAX_PHYS)
1743 		dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
1744 			__func__, (long) (iport - &ihost->ports[0]));
1745 }
1746 
1747 void isci_port_formed(struct asd_sas_phy *phy)
1748 {
1749 	struct isci_host *ihost = phy->ha->lldd_ha;
1750 	struct isci_phy *iphy = to_iphy(phy);
1751 	struct asd_sas_port *port = phy->port;
1752 	struct isci_port *iport = NULL;
1753 	unsigned long flags;
1754 	int i;
1755 
1756 	/* initial ports are formed as the driver is still initializing,
1757 	 * wait for that process to complete
1758 	 */
1759 	wait_for_start(ihost);
1760 
1761 	spin_lock_irqsave(&ihost->scic_lock, flags);
1762 	for (i = 0; i < SCI_MAX_PORTS; i++) {
1763 		iport = &ihost->ports[i];
1764 		if (iport->active_phy_mask & 1 << iphy->phy_index)
1765 			break;
1766 	}
1767 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1768 
1769 	if (i >= SCI_MAX_PORTS)
1770 		iport = NULL;
1771 
1772 	port->lldd_port = iport;
1773 }
1774