xref: /linux/drivers/scsi/hisi_sas/hisi_sas_main.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Copyright (c) 2015 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  */
11 
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14 
15 #define DEV_IS_GONE(dev) \
16 	((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17 
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 				u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 			     struct domain_device *device,
23 			     int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26 				void *funcdata);
27 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
28 				  struct domain_device *device);
29 static void hisi_sas_dev_gone(struct domain_device *device);
30 
31 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
32 {
33 	switch (fis->command) {
34 	case ATA_CMD_FPDMA_WRITE:
35 	case ATA_CMD_FPDMA_READ:
36 	case ATA_CMD_FPDMA_RECV:
37 	case ATA_CMD_FPDMA_SEND:
38 	case ATA_CMD_NCQ_NON_DATA:
39 		return HISI_SAS_SATA_PROTOCOL_FPDMA;
40 
41 	case ATA_CMD_DOWNLOAD_MICRO:
42 	case ATA_CMD_ID_ATA:
43 	case ATA_CMD_PMP_READ:
44 	case ATA_CMD_READ_LOG_EXT:
45 	case ATA_CMD_PIO_READ:
46 	case ATA_CMD_PIO_READ_EXT:
47 	case ATA_CMD_PMP_WRITE:
48 	case ATA_CMD_WRITE_LOG_EXT:
49 	case ATA_CMD_PIO_WRITE:
50 	case ATA_CMD_PIO_WRITE_EXT:
51 		return HISI_SAS_SATA_PROTOCOL_PIO;
52 
53 	case ATA_CMD_DSM:
54 	case ATA_CMD_DOWNLOAD_MICRO_DMA:
55 	case ATA_CMD_PMP_READ_DMA:
56 	case ATA_CMD_PMP_WRITE_DMA:
57 	case ATA_CMD_READ:
58 	case ATA_CMD_READ_EXT:
59 	case ATA_CMD_READ_LOG_DMA_EXT:
60 	case ATA_CMD_READ_STREAM_DMA_EXT:
61 	case ATA_CMD_TRUSTED_RCV_DMA:
62 	case ATA_CMD_TRUSTED_SND_DMA:
63 	case ATA_CMD_WRITE:
64 	case ATA_CMD_WRITE_EXT:
65 	case ATA_CMD_WRITE_FUA_EXT:
66 	case ATA_CMD_WRITE_QUEUED:
67 	case ATA_CMD_WRITE_LOG_DMA_EXT:
68 	case ATA_CMD_WRITE_STREAM_DMA_EXT:
69 	case ATA_CMD_ZAC_MGMT_IN:
70 		return HISI_SAS_SATA_PROTOCOL_DMA;
71 
72 	case ATA_CMD_CHK_POWER:
73 	case ATA_CMD_DEV_RESET:
74 	case ATA_CMD_EDD:
75 	case ATA_CMD_FLUSH:
76 	case ATA_CMD_FLUSH_EXT:
77 	case ATA_CMD_VERIFY:
78 	case ATA_CMD_VERIFY_EXT:
79 	case ATA_CMD_SET_FEATURES:
80 	case ATA_CMD_STANDBY:
81 	case ATA_CMD_STANDBYNOW1:
82 	case ATA_CMD_ZAC_MGMT_OUT:
83 		return HISI_SAS_SATA_PROTOCOL_NONDATA;
84 
85 	case ATA_CMD_SET_MAX:
86 		switch (fis->features) {
87 		case ATA_SET_MAX_PASSWD:
88 		case ATA_SET_MAX_LOCK:
89 			return HISI_SAS_SATA_PROTOCOL_PIO;
90 
91 		case ATA_SET_MAX_PASSWD_DMA:
92 		case ATA_SET_MAX_UNLOCK_DMA:
93 			return HISI_SAS_SATA_PROTOCOL_DMA;
94 
95 		default:
96 			return HISI_SAS_SATA_PROTOCOL_NONDATA;
97 		}
98 
99 	default:
100 	{
101 		if (direction == DMA_NONE)
102 			return HISI_SAS_SATA_PROTOCOL_NONDATA;
103 		return HISI_SAS_SATA_PROTOCOL_PIO;
104 	}
105 	}
106 }
107 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
108 
109 void hisi_sas_sata_done(struct sas_task *task,
110 			    struct hisi_sas_slot *slot)
111 {
112 	struct task_status_struct *ts = &task->task_status;
113 	struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
114 	struct hisi_sas_status_buffer *status_buf =
115 			hisi_sas_status_buf_addr_mem(slot);
116 	u8 *iu = &status_buf->iu[0];
117 	struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
118 
119 	resp->frame_len = sizeof(struct dev_to_host_fis);
120 	memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
121 
122 	ts->buf_valid_size = sizeof(*resp);
123 }
124 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
125 
126 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
127 {
128 	struct ata_queued_cmd *qc = task->uldd_task;
129 
130 	if (qc) {
131 		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
132 			qc->tf.command == ATA_CMD_FPDMA_READ) {
133 			*tag = qc->tag;
134 			return 1;
135 		}
136 	}
137 	return 0;
138 }
139 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
140 
141 /*
142  * This function assumes linkrate mask fits in 8 bits, which it
143  * does for all HW versions supported.
144  */
145 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
146 {
147 	u16 rate = 0;
148 	int i;
149 
150 	max -= SAS_LINK_RATE_1_5_GBPS;
151 	for (i = 0; i <= max; i++)
152 		rate |= 1 << (i * 2);
153 	return rate;
154 }
155 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
156 
157 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
158 {
159 	return device->port->ha->lldd_ha;
160 }
161 
162 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
163 {
164 	return container_of(sas_port, struct hisi_sas_port, sas_port);
165 }
166 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
167 
168 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
169 {
170 	int phy_no;
171 
172 	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
173 		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
174 }
175 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
176 
177 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
178 {
179 	void *bitmap = hisi_hba->slot_index_tags;
180 
181 	clear_bit(slot_idx, bitmap);
182 }
183 
184 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
185 {
186 	hisi_sas_slot_index_clear(hisi_hba, slot_idx);
187 }
188 
189 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
190 {
191 	void *bitmap = hisi_hba->slot_index_tags;
192 
193 	set_bit(slot_idx, bitmap);
194 }
195 
196 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
197 {
198 	unsigned int index;
199 	void *bitmap = hisi_hba->slot_index_tags;
200 
201 	index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
202 			hisi_hba->last_slot_index + 1);
203 	if (index >= hisi_hba->slot_index_count) {
204 		index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
205 					   0);
206 		if (index >= hisi_hba->slot_index_count)
207 			return -SAS_QUEUE_FULL;
208 	}
209 	hisi_sas_slot_index_set(hisi_hba, index);
210 	*slot_idx = index;
211 	hisi_hba->last_slot_index = index;
212 
213 	return 0;
214 }
215 
216 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
217 {
218 	int i;
219 
220 	for (i = 0; i < hisi_hba->slot_index_count; ++i)
221 		hisi_sas_slot_index_clear(hisi_hba, i);
222 }
223 
224 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
225 			     struct hisi_sas_slot *slot)
226 {
227 	struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
228 	unsigned long flags;
229 
230 	if (task) {
231 		struct device *dev = hisi_hba->dev;
232 
233 		if (!task->lldd_task)
234 			return;
235 
236 		task->lldd_task = NULL;
237 
238 		if (!sas_protocol_ata(task->task_proto))
239 			if (slot->n_elem)
240 				dma_unmap_sg(dev, task->scatter,
241 					     task->num_scatter,
242 					     task->data_dir);
243 	}
244 
245 
246 	spin_lock_irqsave(&dq->lock, flags);
247 	list_del_init(&slot->entry);
248 	spin_unlock_irqrestore(&dq->lock, flags);
249 
250 	memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
251 
252 	spin_lock_irqsave(&hisi_hba->lock, flags);
253 	hisi_sas_slot_index_free(hisi_hba, slot->idx);
254 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
255 }
256 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
257 
258 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
259 				  struct hisi_sas_slot *slot)
260 {
261 	hisi_hba->hw->prep_smp(hisi_hba, slot);
262 }
263 
264 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
265 				  struct hisi_sas_slot *slot)
266 {
267 	hisi_hba->hw->prep_ssp(hisi_hba, slot);
268 }
269 
270 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
271 				  struct hisi_sas_slot *slot)
272 {
273 	hisi_hba->hw->prep_stp(hisi_hba, slot);
274 }
275 
276 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
277 		struct hisi_sas_slot *slot,
278 		int device_id, int abort_flag, int tag_to_abort)
279 {
280 	hisi_hba->hw->prep_abort(hisi_hba, slot,
281 			device_id, abort_flag, tag_to_abort);
282 }
283 
284 static int hisi_sas_task_prep(struct sas_task *task,
285 			      struct hisi_sas_dq **dq_pointer,
286 			      bool is_tmf, struct hisi_sas_tmf_task *tmf,
287 			      int *pass)
288 {
289 	struct domain_device *device = task->dev;
290 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
291 	struct hisi_sas_device *sas_dev = device->lldd_dev;
292 	struct hisi_sas_port *port;
293 	struct hisi_sas_slot *slot;
294 	struct hisi_sas_cmd_hdr	*cmd_hdr_base;
295 	struct asd_sas_port *sas_port = device->port;
296 	struct device *dev = hisi_hba->dev;
297 	int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
298 	int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
299 	struct hisi_sas_dq *dq;
300 	unsigned long flags;
301 	int wr_q_index;
302 
303 	if (!sas_port) {
304 		struct task_status_struct *ts = &task->task_status;
305 
306 		ts->resp = SAS_TASK_UNDELIVERED;
307 		ts->stat = SAS_PHY_DOWN;
308 		/*
309 		 * libsas will use dev->port, should
310 		 * not call task_done for sata
311 		 */
312 		if (device->dev_type != SAS_SATA_DEV)
313 			task->task_done(task);
314 		return -ECOMM;
315 	}
316 
317 	if (DEV_IS_GONE(sas_dev)) {
318 		if (sas_dev)
319 			dev_info(dev, "task prep: device %d not ready\n",
320 				 sas_dev->device_id);
321 		else
322 			dev_info(dev, "task prep: device %016llx not ready\n",
323 				 SAS_ADDR(device->sas_addr));
324 
325 		return -ECOMM;
326 	}
327 
328 	*dq_pointer = dq = sas_dev->dq;
329 
330 	port = to_hisi_sas_port(sas_port);
331 	if (port && !port->port_attached) {
332 		dev_info(dev, "task prep: %s port%d not attach device\n",
333 			 (dev_is_sata(device)) ?
334 			 "SATA/STP" : "SAS",
335 			 device->port->id);
336 
337 		return -ECOMM;
338 	}
339 
340 	if (!sas_protocol_ata(task->task_proto)) {
341 		unsigned int req_len, resp_len;
342 
343 		if (task->num_scatter) {
344 			n_elem = dma_map_sg(dev, task->scatter,
345 					    task->num_scatter, task->data_dir);
346 			if (!n_elem) {
347 				rc = -ENOMEM;
348 				goto prep_out;
349 			}
350 		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
351 			n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
352 						1, DMA_TO_DEVICE);
353 			if (!n_elem_req) {
354 				rc = -ENOMEM;
355 				goto prep_out;
356 			}
357 			req_len = sg_dma_len(&task->smp_task.smp_req);
358 			if (req_len & 0x3) {
359 				rc = -EINVAL;
360 				goto err_out_dma_unmap;
361 			}
362 			n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
363 						 1, DMA_FROM_DEVICE);
364 			if (!n_elem_resp) {
365 				rc = -ENOMEM;
366 				goto err_out_dma_unmap;
367 			}
368 			resp_len = sg_dma_len(&task->smp_task.smp_resp);
369 			if (resp_len & 0x3) {
370 				rc = -EINVAL;
371 				goto err_out_dma_unmap;
372 			}
373 		}
374 	} else
375 		n_elem = task->num_scatter;
376 
377 	if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
378 		dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
379 			n_elem);
380 		rc = -EINVAL;
381 		goto err_out_dma_unmap;
382 	}
383 
384 	spin_lock_irqsave(&hisi_hba->lock, flags);
385 	if (hisi_hba->hw->slot_index_alloc)
386 		rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
387 						    device);
388 	else
389 		rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
390 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
391 	if (rc)
392 		goto err_out_dma_unmap;
393 
394 	slot = &hisi_hba->slot_info[slot_idx];
395 
396 	spin_lock_irqsave(&dq->lock, flags);
397 	wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
398 	if (wr_q_index < 0) {
399 		spin_unlock_irqrestore(&dq->lock, flags);
400 		rc = -EAGAIN;
401 		goto err_out_tag;
402 	}
403 
404 	list_add_tail(&slot->delivery, &dq->list);
405 	list_add_tail(&slot->entry, &sas_dev->list);
406 	spin_unlock_irqrestore(&dq->lock, flags);
407 
408 	dlvry_queue = dq->id;
409 	dlvry_queue_slot = wr_q_index;
410 
411 	slot->n_elem = n_elem;
412 	slot->dlvry_queue = dlvry_queue;
413 	slot->dlvry_queue_slot = dlvry_queue_slot;
414 	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
415 	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
416 	slot->task = task;
417 	slot->port = port;
418 	slot->tmf = tmf;
419 	slot->is_internal = is_tmf;
420 	task->lldd_task = slot;
421 
422 	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
423 	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
424 	memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
425 
426 	switch (task->task_proto) {
427 	case SAS_PROTOCOL_SMP:
428 		hisi_sas_task_prep_smp(hisi_hba, slot);
429 		break;
430 	case SAS_PROTOCOL_SSP:
431 		hisi_sas_task_prep_ssp(hisi_hba, slot);
432 		break;
433 	case SAS_PROTOCOL_SATA:
434 	case SAS_PROTOCOL_STP:
435 	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
436 		hisi_sas_task_prep_ata(hisi_hba, slot);
437 		break;
438 	default:
439 		dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
440 			task->task_proto);
441 		break;
442 	}
443 
444 	spin_lock_irqsave(&task->task_state_lock, flags);
445 	task->task_state_flags |= SAS_TASK_AT_INITIATOR;
446 	spin_unlock_irqrestore(&task->task_state_lock, flags);
447 
448 	++(*pass);
449 	WRITE_ONCE(slot->ready, 1);
450 
451 	return 0;
452 
453 err_out_tag:
454 	spin_lock_irqsave(&hisi_hba->lock, flags);
455 	hisi_sas_slot_index_free(hisi_hba, slot_idx);
456 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
457 err_out_dma_unmap:
458 	if (!sas_protocol_ata(task->task_proto)) {
459 		if (task->num_scatter) {
460 			dma_unmap_sg(dev, task->scatter, task->num_scatter,
461 			     task->data_dir);
462 		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
463 			if (n_elem_req)
464 				dma_unmap_sg(dev, &task->smp_task.smp_req,
465 					     1, DMA_TO_DEVICE);
466 			if (n_elem_resp)
467 				dma_unmap_sg(dev, &task->smp_task.smp_resp,
468 					     1, DMA_FROM_DEVICE);
469 		}
470 	}
471 prep_out:
472 	dev_err(dev, "task prep: failed[%d]!\n", rc);
473 	return rc;
474 }
475 
476 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
477 			      bool is_tmf, struct hisi_sas_tmf_task *tmf)
478 {
479 	u32 rc;
480 	u32 pass = 0;
481 	unsigned long flags;
482 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
483 	struct device *dev = hisi_hba->dev;
484 	struct hisi_sas_dq *dq = NULL;
485 
486 	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
487 		if (in_softirq())
488 			return -EINVAL;
489 
490 		down(&hisi_hba->sem);
491 		up(&hisi_hba->sem);
492 	}
493 
494 	/* protect task_prep and start_delivery sequence */
495 	rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
496 	if (rc)
497 		dev_err(dev, "task exec: failed[%d]!\n", rc);
498 
499 	if (likely(pass)) {
500 		spin_lock_irqsave(&dq->lock, flags);
501 		hisi_hba->hw->start_delivery(dq);
502 		spin_unlock_irqrestore(&dq->lock, flags);
503 	}
504 
505 	return rc;
506 }
507 
508 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
509 {
510 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
511 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
512 	struct sas_ha_struct *sas_ha;
513 
514 	if (!phy->phy_attached)
515 		return;
516 
517 	sas_ha = &hisi_hba->sha;
518 	sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
519 
520 	if (sas_phy->phy) {
521 		struct sas_phy *sphy = sas_phy->phy;
522 
523 		sphy->negotiated_linkrate = sas_phy->linkrate;
524 		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
525 		sphy->maximum_linkrate_hw =
526 			hisi_hba->hw->phy_get_max_linkrate();
527 		if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
528 			sphy->minimum_linkrate = phy->minimum_linkrate;
529 
530 		if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
531 			sphy->maximum_linkrate = phy->maximum_linkrate;
532 	}
533 
534 	if (phy->phy_type & PORT_TYPE_SAS) {
535 		struct sas_identify_frame *id;
536 
537 		id = (struct sas_identify_frame *)phy->frame_rcvd;
538 		id->dev_type = phy->identify.device_type;
539 		id->initiator_bits = SAS_PROTOCOL_ALL;
540 		id->target_bits = phy->identify.target_port_protocols;
541 	} else if (phy->phy_type & PORT_TYPE_SATA) {
542 		/*Nothing*/
543 	}
544 
545 	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
546 	sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
547 }
548 
549 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
550 {
551 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
552 	struct hisi_sas_device *sas_dev = NULL;
553 	unsigned long flags;
554 	int last = hisi_hba->last_dev_id;
555 	int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
556 	int i;
557 
558 	spin_lock_irqsave(&hisi_hba->lock, flags);
559 	for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
560 		if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
561 			int queue = i % hisi_hba->queue_count;
562 			struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
563 
564 			hisi_hba->devices[i].device_id = i;
565 			sas_dev = &hisi_hba->devices[i];
566 			sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
567 			sas_dev->dev_type = device->dev_type;
568 			sas_dev->hisi_hba = hisi_hba;
569 			sas_dev->sas_device = device;
570 			sas_dev->dq = dq;
571 			INIT_LIST_HEAD(&hisi_hba->devices[i].list);
572 			break;
573 		}
574 		i++;
575 	}
576 	hisi_hba->last_dev_id = i;
577 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
578 
579 	return sas_dev;
580 }
581 
582 #define HISI_SAS_SRST_ATA_DISK_CNT 3
583 static int hisi_sas_init_device(struct domain_device *device)
584 {
585 	int rc = TMF_RESP_FUNC_COMPLETE;
586 	struct scsi_lun lun;
587 	struct hisi_sas_tmf_task tmf_task;
588 	int retry = HISI_SAS_SRST_ATA_DISK_CNT;
589 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
590 
591 	switch (device->dev_type) {
592 	case SAS_END_DEVICE:
593 		int_to_scsilun(0, &lun);
594 
595 		tmf_task.tmf = TMF_CLEAR_TASK_SET;
596 		rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
597 						  &tmf_task);
598 		if (rc == TMF_RESP_FUNC_COMPLETE)
599 			hisi_sas_release_task(hisi_hba, device);
600 		break;
601 	case SAS_SATA_DEV:
602 	case SAS_SATA_PM:
603 	case SAS_SATA_PM_PORT:
604 	case SAS_SATA_PENDING:
605 		while (retry-- > 0) {
606 			rc = hisi_sas_softreset_ata_disk(device);
607 			if (!rc)
608 				break;
609 		}
610 		break;
611 	default:
612 		break;
613 	}
614 
615 	return rc;
616 }
617 
618 static int hisi_sas_dev_found(struct domain_device *device)
619 {
620 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
621 	struct domain_device *parent_dev = device->parent;
622 	struct hisi_sas_device *sas_dev;
623 	struct device *dev = hisi_hba->dev;
624 	int rc;
625 
626 	if (hisi_hba->hw->alloc_dev)
627 		sas_dev = hisi_hba->hw->alloc_dev(device);
628 	else
629 		sas_dev = hisi_sas_alloc_dev(device);
630 	if (!sas_dev) {
631 		dev_err(dev, "fail alloc dev: max support %d devices\n",
632 			HISI_SAS_MAX_DEVICES);
633 		return -EINVAL;
634 	}
635 
636 	device->lldd_dev = sas_dev;
637 	hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
638 
639 	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
640 		int phy_no;
641 		u8 phy_num = parent_dev->ex_dev.num_phys;
642 		struct ex_phy *phy;
643 
644 		for (phy_no = 0; phy_no < phy_num; phy_no++) {
645 			phy = &parent_dev->ex_dev.ex_phy[phy_no];
646 			if (SAS_ADDR(phy->attached_sas_addr) ==
647 				SAS_ADDR(device->sas_addr))
648 				break;
649 		}
650 
651 		if (phy_no == phy_num) {
652 			dev_info(dev, "dev found: no attached "
653 				 "dev:%016llx at ex:%016llx\n",
654 				 SAS_ADDR(device->sas_addr),
655 				 SAS_ADDR(parent_dev->sas_addr));
656 			rc = -EINVAL;
657 			goto err_out;
658 		}
659 	}
660 
661 	dev_info(dev, "dev[%d:%x] found\n",
662 		sas_dev->device_id, sas_dev->dev_type);
663 
664 	rc = hisi_sas_init_device(device);
665 	if (rc)
666 		goto err_out;
667 	return 0;
668 
669 err_out:
670 	hisi_sas_dev_gone(device);
671 	return rc;
672 }
673 
674 int hisi_sas_slave_configure(struct scsi_device *sdev)
675 {
676 	struct domain_device *dev = sdev_to_domain_dev(sdev);
677 	int ret = sas_slave_configure(sdev);
678 
679 	if (ret)
680 		return ret;
681 	if (!dev_is_sata(dev))
682 		sas_change_queue_depth(sdev, 64);
683 
684 	return 0;
685 }
686 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
687 
688 void hisi_sas_scan_start(struct Scsi_Host *shost)
689 {
690 	struct hisi_hba *hisi_hba = shost_priv(shost);
691 
692 	hisi_hba->hw->phys_init(hisi_hba);
693 }
694 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
695 
696 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
697 {
698 	struct hisi_hba *hisi_hba = shost_priv(shost);
699 	struct sas_ha_struct *sha = &hisi_hba->sha;
700 
701 	/* Wait for PHY up interrupt to occur */
702 	if (time < HZ)
703 		return 0;
704 
705 	sas_drain_work(sha);
706 	return 1;
707 }
708 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
709 
710 static void hisi_sas_phyup_work(struct work_struct *work)
711 {
712 	struct hisi_sas_phy *phy =
713 		container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
714 	struct hisi_hba *hisi_hba = phy->hisi_hba;
715 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
716 	int phy_no = sas_phy->id;
717 
718 	hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
719 	hisi_sas_bytes_dmaed(hisi_hba, phy_no);
720 }
721 
722 static void hisi_sas_linkreset_work(struct work_struct *work)
723 {
724 	struct hisi_sas_phy *phy =
725 		container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
726 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
727 
728 	hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
729 }
730 
731 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
732 	[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
733 	[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
734 };
735 
736 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
737 				enum hisi_sas_phy_event event)
738 {
739 	struct hisi_hba *hisi_hba = phy->hisi_hba;
740 
741 	if (WARN_ON(event >= HISI_PHYES_NUM))
742 		return false;
743 
744 	return queue_work(hisi_hba->wq, &phy->works[event]);
745 }
746 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
747 
748 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
749 {
750 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
751 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
752 	int i;
753 
754 	phy->hisi_hba = hisi_hba;
755 	phy->port = NULL;
756 	phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
757 	phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
758 	sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
759 	sas_phy->class = SAS;
760 	sas_phy->iproto = SAS_PROTOCOL_ALL;
761 	sas_phy->tproto = 0;
762 	sas_phy->type = PHY_TYPE_PHYSICAL;
763 	sas_phy->role = PHY_ROLE_INITIATOR;
764 	sas_phy->oob_mode = OOB_NOT_CONNECTED;
765 	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
766 	sas_phy->id = phy_no;
767 	sas_phy->sas_addr = &hisi_hba->sas_addr[0];
768 	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
769 	sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
770 	sas_phy->lldd_phy = phy;
771 
772 	for (i = 0; i < HISI_PHYES_NUM; i++)
773 		INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
774 
775 	spin_lock_init(&phy->lock);
776 }
777 
778 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
779 {
780 	struct sas_ha_struct *sas_ha = sas_phy->ha;
781 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
782 	struct hisi_sas_phy *phy = sas_phy->lldd_phy;
783 	struct asd_sas_port *sas_port = sas_phy->port;
784 	struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
785 	unsigned long flags;
786 
787 	if (!sas_port)
788 		return;
789 
790 	spin_lock_irqsave(&hisi_hba->lock, flags);
791 	port->port_attached = 1;
792 	port->id = phy->port_id;
793 	phy->port = port;
794 	sas_port->lldd_port = port;
795 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
796 }
797 
798 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
799 				     struct hisi_sas_slot *slot)
800 {
801 	if (task) {
802 		unsigned long flags;
803 		struct task_status_struct *ts;
804 
805 		ts = &task->task_status;
806 
807 		ts->resp = SAS_TASK_COMPLETE;
808 		ts->stat = SAS_ABORTED_TASK;
809 		spin_lock_irqsave(&task->task_state_lock, flags);
810 		task->task_state_flags &=
811 			~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
812 		task->task_state_flags |= SAS_TASK_STATE_DONE;
813 		spin_unlock_irqrestore(&task->task_state_lock, flags);
814 	}
815 
816 	hisi_sas_slot_task_free(hisi_hba, task, slot);
817 }
818 
819 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
820 			struct domain_device *device)
821 {
822 	struct hisi_sas_slot *slot, *slot2;
823 	struct hisi_sas_device *sas_dev = device->lldd_dev;
824 
825 	list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
826 		hisi_sas_do_release_task(hisi_hba, slot->task, slot);
827 }
828 
829 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
830 {
831 	struct hisi_sas_device *sas_dev;
832 	struct domain_device *device;
833 	int i;
834 
835 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
836 		sas_dev = &hisi_hba->devices[i];
837 		device = sas_dev->sas_device;
838 
839 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
840 		    !device)
841 			continue;
842 
843 		hisi_sas_release_task(hisi_hba, device);
844 	}
845 }
846 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
847 
848 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
849 				struct domain_device *device)
850 {
851 	if (hisi_hba->hw->dereg_device)
852 		hisi_hba->hw->dereg_device(hisi_hba, device);
853 }
854 
855 static void hisi_sas_dev_gone(struct domain_device *device)
856 {
857 	struct hisi_sas_device *sas_dev = device->lldd_dev;
858 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
859 	struct device *dev = hisi_hba->dev;
860 
861 	dev_info(dev, "dev[%d:%x] is gone\n",
862 		 sas_dev->device_id, sas_dev->dev_type);
863 
864 	if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
865 		hisi_sas_internal_task_abort(hisi_hba, device,
866 				     HISI_SAS_INT_ABT_DEV, 0);
867 
868 		hisi_sas_dereg_device(hisi_hba, device);
869 
870 		down(&hisi_hba->sem);
871 		hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
872 		up(&hisi_hba->sem);
873 		device->lldd_dev = NULL;
874 	}
875 
876 	if (hisi_hba->hw->free_device)
877 		hisi_hba->hw->free_device(sas_dev);
878 	sas_dev->dev_type = SAS_PHY_UNUSED;
879 }
880 
881 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
882 {
883 	return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
884 }
885 
886 static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
887 			struct sas_phy_linkrates *r)
888 {
889 	struct sas_phy_linkrates _r;
890 
891 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
892 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
893 	enum sas_linkrate min, max;
894 
895 	if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
896 		max = sas_phy->phy->maximum_linkrate;
897 		min = r->minimum_linkrate;
898 	} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
899 		max = r->maximum_linkrate;
900 		min = sas_phy->phy->minimum_linkrate;
901 	} else
902 		return;
903 
904 	_r.maximum_linkrate = max;
905 	_r.minimum_linkrate = min;
906 
907 	hisi_hba->hw->phy_disable(hisi_hba, phy_no);
908 	msleep(100);
909 	hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
910 	hisi_hba->hw->phy_start(hisi_hba, phy_no);
911 }
912 
913 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
914 				void *funcdata)
915 {
916 	struct sas_ha_struct *sas_ha = sas_phy->ha;
917 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
918 	int phy_no = sas_phy->id;
919 
920 	switch (func) {
921 	case PHY_FUNC_HARD_RESET:
922 		hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
923 		break;
924 
925 	case PHY_FUNC_LINK_RESET:
926 		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
927 		msleep(100);
928 		hisi_hba->hw->phy_start(hisi_hba, phy_no);
929 		break;
930 
931 	case PHY_FUNC_DISABLE:
932 		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
933 		break;
934 
935 	case PHY_FUNC_SET_LINK_RATE:
936 		hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
937 		break;
938 	case PHY_FUNC_GET_EVENTS:
939 		if (hisi_hba->hw->get_events) {
940 			hisi_hba->hw->get_events(hisi_hba, phy_no);
941 			break;
942 		}
943 		/* fallthru */
944 	case PHY_FUNC_RELEASE_SPINUP_HOLD:
945 	default:
946 		return -EOPNOTSUPP;
947 	}
948 	return 0;
949 }
950 
951 static void hisi_sas_task_done(struct sas_task *task)
952 {
953 	if (!del_timer(&task->slow_task->timer))
954 		return;
955 	complete(&task->slow_task->completion);
956 }
957 
958 static void hisi_sas_tmf_timedout(struct timer_list *t)
959 {
960 	struct sas_task_slow *slow = from_timer(slow, t, timer);
961 	struct sas_task *task = slow->task;
962 	unsigned long flags;
963 
964 	spin_lock_irqsave(&task->task_state_lock, flags);
965 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
966 		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
967 	spin_unlock_irqrestore(&task->task_state_lock, flags);
968 
969 	complete(&task->slow_task->completion);
970 }
971 
972 #define TASK_TIMEOUT 20
973 #define TASK_RETRY 3
974 #define INTERNAL_ABORT_TIMEOUT 6
975 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
976 					   void *parameter, u32 para_len,
977 					   struct hisi_sas_tmf_task *tmf)
978 {
979 	struct hisi_sas_device *sas_dev = device->lldd_dev;
980 	struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
981 	struct device *dev = hisi_hba->dev;
982 	struct sas_task *task;
983 	int res, retry;
984 
985 	for (retry = 0; retry < TASK_RETRY; retry++) {
986 		task = sas_alloc_slow_task(GFP_KERNEL);
987 		if (!task)
988 			return -ENOMEM;
989 
990 		task->dev = device;
991 		task->task_proto = device->tproto;
992 
993 		if (dev_is_sata(device)) {
994 			task->ata_task.device_control_reg_update = 1;
995 			memcpy(&task->ata_task.fis, parameter, para_len);
996 		} else {
997 			memcpy(&task->ssp_task, parameter, para_len);
998 		}
999 		task->task_done = hisi_sas_task_done;
1000 
1001 		task->slow_task->timer.function = hisi_sas_tmf_timedout;
1002 		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
1003 		add_timer(&task->slow_task->timer);
1004 
1005 		res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1006 
1007 		if (res) {
1008 			del_timer(&task->slow_task->timer);
1009 			dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1010 				res);
1011 			goto ex_err;
1012 		}
1013 
1014 		wait_for_completion(&task->slow_task->completion);
1015 		res = TMF_RESP_FUNC_FAILED;
1016 		/* Even TMF timed out, return direct. */
1017 		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1018 			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1019 				struct hisi_sas_slot *slot = task->lldd_task;
1020 
1021 				dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1022 				if (slot)
1023 					slot->task = NULL;
1024 
1025 				goto ex_err;
1026 			} else
1027 				dev_err(dev, "abort tmf: TMF task timeout\n");
1028 		}
1029 
1030 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1031 		     task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1032 			res = TMF_RESP_FUNC_COMPLETE;
1033 			break;
1034 		}
1035 
1036 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1037 			task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1038 			res = TMF_RESP_FUNC_SUCC;
1039 			break;
1040 		}
1041 
1042 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1043 		      task->task_status.stat == SAS_DATA_UNDERRUN) {
1044 			/* no error, but return the number of bytes of
1045 			 * underrun
1046 			 */
1047 			dev_warn(dev, "abort tmf: task to dev %016llx "
1048 				 "resp: 0x%x sts 0x%x underrun\n",
1049 				 SAS_ADDR(device->sas_addr),
1050 				 task->task_status.resp,
1051 				 task->task_status.stat);
1052 			res = task->task_status.residual;
1053 			break;
1054 		}
1055 
1056 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1057 			task->task_status.stat == SAS_DATA_OVERRUN) {
1058 			dev_warn(dev, "abort tmf: blocked task error\n");
1059 			res = -EMSGSIZE;
1060 			break;
1061 		}
1062 
1063 		dev_warn(dev, "abort tmf: task to dev "
1064 			 "%016llx resp: 0x%x status 0x%x\n",
1065 			 SAS_ADDR(device->sas_addr), task->task_status.resp,
1066 			 task->task_status.stat);
1067 		sas_free_task(task);
1068 		task = NULL;
1069 	}
1070 ex_err:
1071 	if (retry == TASK_RETRY)
1072 		dev_warn(dev, "abort tmf: executing internal task failed!\n");
1073 	sas_free_task(task);
1074 	return res;
1075 }
1076 
1077 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1078 		bool reset, int pmp, u8 *fis)
1079 {
1080 	struct ata_taskfile tf;
1081 
1082 	ata_tf_init(dev, &tf);
1083 	if (reset)
1084 		tf.ctl |= ATA_SRST;
1085 	else
1086 		tf.ctl &= ~ATA_SRST;
1087 	tf.command = ATA_CMD_DEV_RESET;
1088 	ata_tf_to_fis(&tf, pmp, 0, fis);
1089 }
1090 
1091 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1092 {
1093 	u8 fis[20] = {0};
1094 	struct ata_port *ap = device->sata_dev.ap;
1095 	struct ata_link *link;
1096 	int rc = TMF_RESP_FUNC_FAILED;
1097 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1098 	struct device *dev = hisi_hba->dev;
1099 	int s = sizeof(struct host_to_dev_fis);
1100 
1101 	ata_for_each_link(link, ap, EDGE) {
1102 		int pmp = sata_srst_pmp(link);
1103 
1104 		hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1105 		rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1106 		if (rc != TMF_RESP_FUNC_COMPLETE)
1107 			break;
1108 	}
1109 
1110 	if (rc == TMF_RESP_FUNC_COMPLETE) {
1111 		ata_for_each_link(link, ap, EDGE) {
1112 			int pmp = sata_srst_pmp(link);
1113 
1114 			hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1115 			rc = hisi_sas_exec_internal_tmf_task(device, fis,
1116 							     s, NULL);
1117 			if (rc != TMF_RESP_FUNC_COMPLETE)
1118 				dev_err(dev, "ata disk de-reset failed\n");
1119 		}
1120 	} else {
1121 		dev_err(dev, "ata disk reset failed\n");
1122 	}
1123 
1124 	if (rc == TMF_RESP_FUNC_COMPLETE)
1125 		hisi_sas_release_task(hisi_hba, device);
1126 
1127 	return rc;
1128 }
1129 
1130 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1131 				u8 *lun, struct hisi_sas_tmf_task *tmf)
1132 {
1133 	struct sas_ssp_task ssp_task;
1134 
1135 	if (!(device->tproto & SAS_PROTOCOL_SSP))
1136 		return TMF_RESP_FUNC_ESUPP;
1137 
1138 	memcpy(ssp_task.LUN, lun, 8);
1139 
1140 	return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1141 				sizeof(ssp_task), tmf);
1142 }
1143 
1144 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1145 {
1146 	u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1147 	int i;
1148 
1149 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1150 		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1151 		struct domain_device *device = sas_dev->sas_device;
1152 		struct asd_sas_port *sas_port;
1153 		struct hisi_sas_port *port;
1154 		struct hisi_sas_phy *phy = NULL;
1155 		struct asd_sas_phy *sas_phy;
1156 
1157 		if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1158 				|| !device || !device->port)
1159 			continue;
1160 
1161 		sas_port = device->port;
1162 		port = to_hisi_sas_port(sas_port);
1163 
1164 		list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1165 			if (state & BIT(sas_phy->id)) {
1166 				phy = sas_phy->lldd_phy;
1167 				break;
1168 			}
1169 
1170 		if (phy) {
1171 			port->id = phy->port_id;
1172 
1173 			/* Update linkrate of directly attached device. */
1174 			if (!device->parent)
1175 				device->linkrate = phy->sas_phy.linkrate;
1176 
1177 			hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1178 		} else
1179 			port->id = 0xff;
1180 	}
1181 }
1182 
1183 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1184 			      u32 state)
1185 {
1186 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1187 	struct asd_sas_port *_sas_port = NULL;
1188 	int phy_no;
1189 
1190 	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1191 		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1192 		struct asd_sas_phy *sas_phy = &phy->sas_phy;
1193 		struct asd_sas_port *sas_port = sas_phy->port;
1194 		bool do_port_check = !!(_sas_port != sas_port);
1195 
1196 		if (!sas_phy->phy->enabled)
1197 			continue;
1198 
1199 		/* Report PHY state change to libsas */
1200 		if (state & BIT(phy_no)) {
1201 			if (do_port_check && sas_port && sas_port->port_dev) {
1202 				struct domain_device *dev = sas_port->port_dev;
1203 
1204 				_sas_port = sas_port;
1205 
1206 				if (DEV_IS_EXPANDER(dev->dev_type))
1207 					sas_ha->notify_port_event(sas_phy,
1208 							PORTE_BROADCAST_RCVD);
1209 			}
1210 		} else if (old_state & (1 << phy_no))
1211 			/* PHY down but was up before */
1212 			hisi_sas_phy_down(hisi_hba, phy_no, 0);
1213 
1214 	}
1215 }
1216 
1217 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1218 {
1219 	struct hisi_sas_device *sas_dev;
1220 	struct domain_device *device;
1221 	int i;
1222 
1223 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1224 		sas_dev = &hisi_hba->devices[i];
1225 		device = sas_dev->sas_device;
1226 
1227 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1228 			continue;
1229 
1230 		hisi_sas_init_device(device);
1231 	}
1232 }
1233 
1234 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1235 					     struct asd_sas_port *sas_port,
1236 					     struct domain_device *device)
1237 {
1238 	struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
1239 	struct ata_port *ap = device->sata_dev.ap;
1240 	struct device *dev = hisi_hba->dev;
1241 	int s = sizeof(struct host_to_dev_fis);
1242 	int rc = TMF_RESP_FUNC_FAILED;
1243 	struct asd_sas_phy *sas_phy;
1244 	struct ata_link *link;
1245 	u8 fis[20] = {0};
1246 	u32 state;
1247 
1248 	state = hisi_hba->hw->get_phys_state(hisi_hba);
1249 	list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
1250 		if (!(state & BIT(sas_phy->id)))
1251 			continue;
1252 
1253 		ata_for_each_link(link, ap, EDGE) {
1254 			int pmp = sata_srst_pmp(link);
1255 
1256 			tmf_task.phy_id = sas_phy->id;
1257 			hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1258 			rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
1259 							     &tmf_task);
1260 			if (rc != TMF_RESP_FUNC_COMPLETE) {
1261 				dev_err(dev, "phy%d ata reset failed rc=%d\n",
1262 					sas_phy->id, rc);
1263 				break;
1264 			}
1265 		}
1266 	}
1267 }
1268 
1269 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1270 {
1271 	struct device *dev = hisi_hba->dev;
1272 	int port_no, rc, i;
1273 
1274 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1275 		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1276 		struct domain_device *device = sas_dev->sas_device;
1277 
1278 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1279 			continue;
1280 
1281 		rc = hisi_sas_internal_task_abort(hisi_hba, device,
1282 						  HISI_SAS_INT_ABT_DEV, 0);
1283 		if (rc < 0)
1284 			dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1285 	}
1286 
1287 	for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1288 		struct hisi_sas_port *port = &hisi_hba->port[port_no];
1289 		struct asd_sas_port *sas_port = &port->sas_port;
1290 		struct domain_device *port_dev = sas_port->port_dev;
1291 		struct domain_device *device;
1292 
1293 		if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type))
1294 			continue;
1295 
1296 		/* Try to find a SATA device */
1297 		list_for_each_entry(device, &sas_port->dev_list,
1298 				    dev_list_node) {
1299 			if (dev_is_sata(device)) {
1300 				hisi_sas_send_ata_reset_each_phy(hisi_hba,
1301 								 sas_port,
1302 								 device);
1303 				break;
1304 			}
1305 		}
1306 	}
1307 }
1308 
1309 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1310 {
1311 	struct Scsi_Host *shost = hisi_hba->shost;
1312 
1313 	down(&hisi_hba->sem);
1314 	hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1315 
1316 	scsi_block_requests(shost);
1317 	hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1318 
1319 	if (timer_pending(&hisi_hba->timer))
1320 		del_timer_sync(&hisi_hba->timer);
1321 
1322 	set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1323 }
1324 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
1325 
1326 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
1327 {
1328 	struct Scsi_Host *shost = hisi_hba->shost;
1329 	u32 state;
1330 
1331 	/* Init and wait for PHYs to come up and all libsas event finished. */
1332 	hisi_hba->hw->phys_init(hisi_hba);
1333 	msleep(1000);
1334 	hisi_sas_refresh_port_id(hisi_hba);
1335 	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1336 	up(&hisi_hba->sem);
1337 
1338 	if (hisi_hba->reject_stp_links_msk)
1339 		hisi_sas_terminate_stp_reject(hisi_hba);
1340 	hisi_sas_reset_init_all_devices(hisi_hba);
1341 	scsi_unblock_requests(shost);
1342 	clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1343 
1344 	state = hisi_hba->hw->get_phys_state(hisi_hba);
1345 	hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state);
1346 }
1347 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
1348 
1349 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1350 {
1351 	struct device *dev = hisi_hba->dev;
1352 	struct Scsi_Host *shost = hisi_hba->shost;
1353 	int rc;
1354 
1355 	if (!hisi_hba->hw->soft_reset)
1356 		return -1;
1357 
1358 	if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1359 		return -1;
1360 
1361 	dev_info(dev, "controller resetting...\n");
1362 	hisi_sas_controller_reset_prepare(hisi_hba);
1363 
1364 	rc = hisi_hba->hw->soft_reset(hisi_hba);
1365 	if (rc) {
1366 		dev_warn(dev, "controller reset failed (%d)\n", rc);
1367 		clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1368 		up(&hisi_hba->sem);
1369 		scsi_unblock_requests(shost);
1370 		clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1371 		return rc;
1372 	}
1373 
1374 	hisi_sas_controller_reset_done(hisi_hba);
1375 	dev_info(dev, "controller reset complete\n");
1376 
1377 	return 0;
1378 }
1379 
1380 static int hisi_sas_abort_task(struct sas_task *task)
1381 {
1382 	struct scsi_lun lun;
1383 	struct hisi_sas_tmf_task tmf_task;
1384 	struct domain_device *device = task->dev;
1385 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1386 	struct hisi_hba *hisi_hba;
1387 	struct device *dev;
1388 	int rc = TMF_RESP_FUNC_FAILED;
1389 	unsigned long flags;
1390 
1391 	if (!sas_dev)
1392 		return TMF_RESP_FUNC_FAILED;
1393 
1394 	hisi_hba = dev_to_hisi_hba(task->dev);
1395 	dev = hisi_hba->dev;
1396 
1397 	spin_lock_irqsave(&task->task_state_lock, flags);
1398 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1399 		spin_unlock_irqrestore(&task->task_state_lock, flags);
1400 		rc = TMF_RESP_FUNC_COMPLETE;
1401 		goto out;
1402 	}
1403 	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1404 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1405 
1406 	sas_dev->dev_status = HISI_SAS_DEV_EH;
1407 	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1408 		struct scsi_cmnd *cmnd = task->uldd_task;
1409 		struct hisi_sas_slot *slot = task->lldd_task;
1410 		u32 tag = slot->idx;
1411 		int rc2;
1412 
1413 		int_to_scsilun(cmnd->device->lun, &lun);
1414 		tmf_task.tmf = TMF_ABORT_TASK;
1415 		tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1416 
1417 		rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1418 						  &tmf_task);
1419 
1420 		rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1421 						   HISI_SAS_INT_ABT_CMD, tag);
1422 		if (rc2 < 0) {
1423 			dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1424 			return TMF_RESP_FUNC_FAILED;
1425 		}
1426 
1427 		/*
1428 		 * If the TMF finds that the IO is not in the device and also
1429 		 * the internal abort does not succeed, then it is safe to
1430 		 * free the slot.
1431 		 * Note: if the internal abort succeeds then the slot
1432 		 * will have already been completed
1433 		 */
1434 		if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1435 			if (task->lldd_task)
1436 				hisi_sas_do_release_task(hisi_hba, task, slot);
1437 		}
1438 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
1439 		task->task_proto & SAS_PROTOCOL_STP) {
1440 		if (task->dev->dev_type == SAS_SATA_DEV) {
1441 			rc = hisi_sas_internal_task_abort(hisi_hba, device,
1442 						HISI_SAS_INT_ABT_DEV, 0);
1443 			if (rc < 0) {
1444 				dev_err(dev, "abort task: internal abort failed\n");
1445 				goto out;
1446 			}
1447 			hisi_sas_dereg_device(hisi_hba, device);
1448 			rc = hisi_sas_softreset_ata_disk(device);
1449 		}
1450 	} else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1451 		/* SMP */
1452 		struct hisi_sas_slot *slot = task->lldd_task;
1453 		u32 tag = slot->idx;
1454 
1455 		rc = hisi_sas_internal_task_abort(hisi_hba, device,
1456 			     HISI_SAS_INT_ABT_CMD, tag);
1457 		if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1458 					task->lldd_task)
1459 			hisi_sas_do_release_task(hisi_hba, task, slot);
1460 	}
1461 
1462 out:
1463 	if (rc != TMF_RESP_FUNC_COMPLETE)
1464 		dev_notice(dev, "abort task: rc=%d\n", rc);
1465 	return rc;
1466 }
1467 
1468 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1469 {
1470 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1471 	struct device *dev = hisi_hba->dev;
1472 	struct hisi_sas_tmf_task tmf_task;
1473 	int rc = TMF_RESP_FUNC_FAILED;
1474 
1475 	rc = hisi_sas_internal_task_abort(hisi_hba, device,
1476 					HISI_SAS_INT_ABT_DEV, 0);
1477 	if (rc < 0) {
1478 		dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1479 		return TMF_RESP_FUNC_FAILED;
1480 	}
1481 	hisi_sas_dereg_device(hisi_hba, device);
1482 
1483 	tmf_task.tmf = TMF_ABORT_TASK_SET;
1484 	rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1485 
1486 	if (rc == TMF_RESP_FUNC_COMPLETE)
1487 		hisi_sas_release_task(hisi_hba, device);
1488 
1489 	return rc;
1490 }
1491 
1492 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1493 {
1494 	int rc = TMF_RESP_FUNC_FAILED;
1495 	struct hisi_sas_tmf_task tmf_task;
1496 
1497 	tmf_task.tmf = TMF_CLEAR_ACA;
1498 	rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1499 
1500 	return rc;
1501 }
1502 
1503 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1504 {
1505 	struct sas_phy *local_phy = sas_get_local_phy(device);
1506 	int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1507 			(device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1508 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1509 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1510 	struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
1511 	struct hisi_sas_phy *phy = container_of(sas_phy,
1512 			struct hisi_sas_phy, sas_phy);
1513 	DECLARE_COMPLETION_ONSTACK(phyreset);
1514 
1515 	if (scsi_is_sas_phy_local(local_phy)) {
1516 		phy->in_reset = 1;
1517 		phy->reset_completion = &phyreset;
1518 	}
1519 
1520 	rc = sas_phy_reset(local_phy, reset_type);
1521 	sas_put_local_phy(local_phy);
1522 
1523 	if (scsi_is_sas_phy_local(local_phy)) {
1524 		int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
1525 		unsigned long flags;
1526 
1527 		spin_lock_irqsave(&phy->lock, flags);
1528 		phy->reset_completion = NULL;
1529 		phy->in_reset = 0;
1530 		spin_unlock_irqrestore(&phy->lock, flags);
1531 
1532 		/* report PHY down if timed out */
1533 		if (!ret)
1534 			hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
1535 	} else
1536 		msleep(2000);
1537 
1538 	return rc;
1539 }
1540 
1541 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1542 {
1543 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1544 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1545 	struct device *dev = hisi_hba->dev;
1546 	int rc = TMF_RESP_FUNC_FAILED;
1547 
1548 	if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1549 		return TMF_RESP_FUNC_FAILED;
1550 	sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1551 
1552 	rc = hisi_sas_internal_task_abort(hisi_hba, device,
1553 					HISI_SAS_INT_ABT_DEV, 0);
1554 	if (rc < 0) {
1555 		dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1556 		return TMF_RESP_FUNC_FAILED;
1557 	}
1558 	hisi_sas_dereg_device(hisi_hba, device);
1559 
1560 	rc = hisi_sas_debug_I_T_nexus_reset(device);
1561 
1562 	if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1563 		hisi_sas_release_task(hisi_hba, device);
1564 
1565 	return rc;
1566 }
1567 
1568 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1569 {
1570 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1571 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1572 	struct device *dev = hisi_hba->dev;
1573 	int rc = TMF_RESP_FUNC_FAILED;
1574 
1575 	sas_dev->dev_status = HISI_SAS_DEV_EH;
1576 	if (dev_is_sata(device)) {
1577 		struct sas_phy *phy;
1578 
1579 		/* Clear internal IO and then hardreset */
1580 		rc = hisi_sas_internal_task_abort(hisi_hba, device,
1581 						  HISI_SAS_INT_ABT_DEV, 0);
1582 		if (rc < 0) {
1583 			dev_err(dev, "lu_reset: internal abort failed\n");
1584 			goto out;
1585 		}
1586 		hisi_sas_dereg_device(hisi_hba, device);
1587 
1588 		phy = sas_get_local_phy(device);
1589 
1590 		rc = sas_phy_reset(phy, 1);
1591 
1592 		if (rc == 0)
1593 			hisi_sas_release_task(hisi_hba, device);
1594 		sas_put_local_phy(phy);
1595 	} else {
1596 		struct hisi_sas_tmf_task tmf_task = { .tmf =  TMF_LU_RESET };
1597 
1598 		rc = hisi_sas_internal_task_abort(hisi_hba, device,
1599 						HISI_SAS_INT_ABT_DEV, 0);
1600 		if (rc < 0) {
1601 			dev_err(dev, "lu_reset: internal abort failed\n");
1602 			goto out;
1603 		}
1604 		hisi_sas_dereg_device(hisi_hba, device);
1605 
1606 		rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1607 		if (rc == TMF_RESP_FUNC_COMPLETE)
1608 			hisi_sas_release_task(hisi_hba, device);
1609 	}
1610 out:
1611 	if (rc != TMF_RESP_FUNC_COMPLETE)
1612 		dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1613 			     sas_dev->device_id, rc);
1614 	return rc;
1615 }
1616 
1617 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1618 {
1619 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1620 	struct device *dev = hisi_hba->dev;
1621 	HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1622 	int rc, i;
1623 
1624 	queue_work(hisi_hba->wq, &r.work);
1625 	wait_for_completion(r.completion);
1626 	if (!r.done)
1627 		return TMF_RESP_FUNC_FAILED;
1628 
1629 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1630 		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1631 		struct domain_device *device = sas_dev->sas_device;
1632 
1633 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1634 		    DEV_IS_EXPANDER(device->dev_type))
1635 			continue;
1636 
1637 		rc = hisi_sas_debug_I_T_nexus_reset(device);
1638 		if (rc != TMF_RESP_FUNC_COMPLETE)
1639 			dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
1640 				 sas_dev->device_id, rc);
1641 	}
1642 
1643 	hisi_sas_release_tasks(hisi_hba);
1644 
1645 	return TMF_RESP_FUNC_COMPLETE;
1646 }
1647 
1648 static int hisi_sas_query_task(struct sas_task *task)
1649 {
1650 	struct scsi_lun lun;
1651 	struct hisi_sas_tmf_task tmf_task;
1652 	int rc = TMF_RESP_FUNC_FAILED;
1653 
1654 	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1655 		struct scsi_cmnd *cmnd = task->uldd_task;
1656 		struct domain_device *device = task->dev;
1657 		struct hisi_sas_slot *slot = task->lldd_task;
1658 		u32 tag = slot->idx;
1659 
1660 		int_to_scsilun(cmnd->device->lun, &lun);
1661 		tmf_task.tmf = TMF_QUERY_TASK;
1662 		tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1663 
1664 		rc = hisi_sas_debug_issue_ssp_tmf(device,
1665 						  lun.scsi_lun,
1666 						  &tmf_task);
1667 		switch (rc) {
1668 		/* The task is still in Lun, release it then */
1669 		case TMF_RESP_FUNC_SUCC:
1670 		/* The task is not in Lun or failed, reset the phy */
1671 		case TMF_RESP_FUNC_FAILED:
1672 		case TMF_RESP_FUNC_COMPLETE:
1673 			break;
1674 		default:
1675 			rc = TMF_RESP_FUNC_FAILED;
1676 			break;
1677 		}
1678 	}
1679 	return rc;
1680 }
1681 
1682 static int
1683 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1684 				  struct sas_task *task, int abort_flag,
1685 				  int task_tag)
1686 {
1687 	struct domain_device *device = task->dev;
1688 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1689 	struct device *dev = hisi_hba->dev;
1690 	struct hisi_sas_port *port;
1691 	struct hisi_sas_slot *slot;
1692 	struct asd_sas_port *sas_port = device->port;
1693 	struct hisi_sas_cmd_hdr *cmd_hdr_base;
1694 	struct hisi_sas_dq *dq = sas_dev->dq;
1695 	int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1696 	unsigned long flags, flags_dq = 0;
1697 	int wr_q_index;
1698 
1699 	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1700 		return -EINVAL;
1701 
1702 	if (!device->port)
1703 		return -1;
1704 
1705 	port = to_hisi_sas_port(sas_port);
1706 
1707 	/* simply get a slot and send abort command */
1708 	spin_lock_irqsave(&hisi_hba->lock, flags);
1709 	rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1710 	if (rc) {
1711 		spin_unlock_irqrestore(&hisi_hba->lock, flags);
1712 		goto err_out;
1713 	}
1714 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
1715 
1716 	slot = &hisi_hba->slot_info[slot_idx];
1717 
1718 	spin_lock_irqsave(&dq->lock, flags_dq);
1719 	wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1720 	if (wr_q_index < 0) {
1721 		spin_unlock_irqrestore(&dq->lock, flags_dq);
1722 		rc = -EAGAIN;
1723 		goto err_out_tag;
1724 	}
1725 	list_add_tail(&slot->delivery, &dq->list);
1726 	spin_unlock_irqrestore(&dq->lock, flags_dq);
1727 
1728 	dlvry_queue = dq->id;
1729 	dlvry_queue_slot = wr_q_index;
1730 
1731 	slot->n_elem = n_elem;
1732 	slot->dlvry_queue = dlvry_queue;
1733 	slot->dlvry_queue_slot = dlvry_queue_slot;
1734 	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1735 	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1736 	slot->task = task;
1737 	slot->port = port;
1738 	slot->is_internal = true;
1739 	task->lldd_task = slot;
1740 
1741 	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1742 	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1743 	memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1744 
1745 	hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1746 				      abort_flag, task_tag);
1747 
1748 	spin_lock_irqsave(&task->task_state_lock, flags);
1749 	task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1750 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1751 
1752 	WRITE_ONCE(slot->ready, 1);
1753 	/* send abort command to the chip */
1754 	spin_lock_irqsave(&dq->lock, flags);
1755 	list_add_tail(&slot->entry, &sas_dev->list);
1756 	hisi_hba->hw->start_delivery(dq);
1757 	spin_unlock_irqrestore(&dq->lock, flags);
1758 
1759 	return 0;
1760 
1761 err_out_tag:
1762 	spin_lock_irqsave(&hisi_hba->lock, flags);
1763 	hisi_sas_slot_index_free(hisi_hba, slot_idx);
1764 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
1765 err_out:
1766 	dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1767 
1768 	return rc;
1769 }
1770 
1771 /**
1772  * hisi_sas_internal_task_abort -- execute an internal
1773  * abort command for single IO command or a device
1774  * @hisi_hba: host controller struct
1775  * @device: domain device
1776  * @abort_flag: mode of operation, device or single IO
1777  * @tag: tag of IO to be aborted (only relevant to single
1778  *       IO mode)
1779  */
1780 static int
1781 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1782 			     struct domain_device *device,
1783 			     int abort_flag, int tag)
1784 {
1785 	struct sas_task *task;
1786 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1787 	struct device *dev = hisi_hba->dev;
1788 	int res;
1789 
1790 	/*
1791 	 * The interface is not realized means this HW don't support internal
1792 	 * abort, or don't need to do internal abort. Then here, we return
1793 	 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1794 	 * the internal abort has been executed and returned CQ.
1795 	 */
1796 	if (!hisi_hba->hw->prep_abort)
1797 		return TMF_RESP_FUNC_FAILED;
1798 
1799 	task = sas_alloc_slow_task(GFP_KERNEL);
1800 	if (!task)
1801 		return -ENOMEM;
1802 
1803 	task->dev = device;
1804 	task->task_proto = device->tproto;
1805 	task->task_done = hisi_sas_task_done;
1806 	task->slow_task->timer.function = hisi_sas_tmf_timedout;
1807 	task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1808 	add_timer(&task->slow_task->timer);
1809 
1810 	res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1811 						task, abort_flag, tag);
1812 	if (res) {
1813 		del_timer(&task->slow_task->timer);
1814 		dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1815 			res);
1816 		goto exit;
1817 	}
1818 	wait_for_completion(&task->slow_task->completion);
1819 	res = TMF_RESP_FUNC_FAILED;
1820 
1821 	/* Internal abort timed out */
1822 	if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1823 		if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1824 			struct hisi_sas_slot *slot = task->lldd_task;
1825 
1826 			if (slot)
1827 				slot->task = NULL;
1828 			dev_err(dev, "internal task abort: timeout and not done.\n");
1829 			res = -EIO;
1830 			goto exit;
1831 		} else
1832 			dev_err(dev, "internal task abort: timeout.\n");
1833 	}
1834 
1835 	if (task->task_status.resp == SAS_TASK_COMPLETE &&
1836 		task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1837 		res = TMF_RESP_FUNC_COMPLETE;
1838 		goto exit;
1839 	}
1840 
1841 	if (task->task_status.resp == SAS_TASK_COMPLETE &&
1842 		task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1843 		res = TMF_RESP_FUNC_SUCC;
1844 		goto exit;
1845 	}
1846 
1847 exit:
1848 	dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1849 		"resp: 0x%x sts 0x%x\n",
1850 		SAS_ADDR(device->sas_addr),
1851 		task,
1852 		task->task_status.resp, /* 0 is complete, -1 is undelivered */
1853 		task->task_status.stat);
1854 	sas_free_task(task);
1855 
1856 	return res;
1857 }
1858 
1859 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1860 {
1861 	hisi_sas_port_notify_formed(sas_phy);
1862 }
1863 
1864 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1865 {
1866 }
1867 
1868 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1869 			u8 reg_index, u8 reg_count, u8 *write_data)
1870 {
1871 	struct hisi_hba *hisi_hba = sha->lldd_ha;
1872 
1873 	if (!hisi_hba->hw->write_gpio)
1874 		return -EOPNOTSUPP;
1875 
1876 	return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1877 				reg_index, reg_count, write_data);
1878 }
1879 
1880 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1881 {
1882 	phy->phy_attached = 0;
1883 	phy->phy_type = 0;
1884 	phy->port = NULL;
1885 }
1886 
1887 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1888 {
1889 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1890 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
1891 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1892 	struct device *dev = hisi_hba->dev;
1893 
1894 	if (rdy) {
1895 		/* Phy down but ready */
1896 		hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1897 		hisi_sas_port_notify_formed(sas_phy);
1898 	} else {
1899 		struct hisi_sas_port *port  = phy->port;
1900 
1901 		if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
1902 		    phy->in_reset) {
1903 			dev_info(dev, "ignore flutter phy%d down\n", phy_no);
1904 			return;
1905 		}
1906 		/* Phy down and not ready */
1907 		sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1908 		sas_phy_disconnected(sas_phy);
1909 
1910 		if (port) {
1911 			if (phy->phy_type & PORT_TYPE_SAS) {
1912 				int port_id = port->id;
1913 
1914 				if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1915 								       port_id))
1916 					port->port_attached = 0;
1917 			} else if (phy->phy_type & PORT_TYPE_SATA)
1918 				port->port_attached = 0;
1919 		}
1920 		hisi_sas_phy_disconnected(phy);
1921 	}
1922 }
1923 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1924 
1925 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1926 {
1927 	int i;
1928 
1929 	for (i = 0; i < hisi_hba->queue_count; i++) {
1930 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1931 
1932 		tasklet_kill(&cq->tasklet);
1933 	}
1934 }
1935 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1936 
1937 struct scsi_transport_template *hisi_sas_stt;
1938 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1939 
1940 struct device_attribute *host_attrs[] = {
1941 	&dev_attr_phy_event_threshold,
1942 	NULL,
1943 };
1944 EXPORT_SYMBOL_GPL(host_attrs);
1945 
1946 static struct sas_domain_function_template hisi_sas_transport_ops = {
1947 	.lldd_dev_found		= hisi_sas_dev_found,
1948 	.lldd_dev_gone		= hisi_sas_dev_gone,
1949 	.lldd_execute_task	= hisi_sas_queue_command,
1950 	.lldd_control_phy	= hisi_sas_control_phy,
1951 	.lldd_abort_task	= hisi_sas_abort_task,
1952 	.lldd_abort_task_set	= hisi_sas_abort_task_set,
1953 	.lldd_clear_aca		= hisi_sas_clear_aca,
1954 	.lldd_I_T_nexus_reset	= hisi_sas_I_T_nexus_reset,
1955 	.lldd_lu_reset		= hisi_sas_lu_reset,
1956 	.lldd_query_task	= hisi_sas_query_task,
1957 	.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1958 	.lldd_port_formed	= hisi_sas_port_formed,
1959 	.lldd_port_deformed = hisi_sas_port_deformed,
1960 	.lldd_write_gpio = hisi_sas_write_gpio,
1961 };
1962 
1963 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1964 {
1965 	int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1966 
1967 	for (i = 0; i < hisi_hba->queue_count; i++) {
1968 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1969 		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1970 
1971 		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1972 		memset(hisi_hba->cmd_hdr[i], 0, s);
1973 		dq->wr_point = 0;
1974 
1975 		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1976 		memset(hisi_hba->complete_hdr[i], 0, s);
1977 		cq->rd_point = 0;
1978 	}
1979 
1980 	s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1981 	memset(hisi_hba->initial_fis, 0, s);
1982 
1983 	s = max_command_entries * sizeof(struct hisi_sas_iost);
1984 	memset(hisi_hba->iost, 0, s);
1985 
1986 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1987 	memset(hisi_hba->breakpoint, 0, s);
1988 
1989 	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1990 	memset(hisi_hba->sata_breakpoint, 0, s);
1991 }
1992 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1993 
1994 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1995 {
1996 	struct device *dev = hisi_hba->dev;
1997 	int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
1998 	int max_command_entries_ru, sz_slot_buf_ru;
1999 	int blk_cnt, slots_per_blk;
2000 
2001 	sema_init(&hisi_hba->sem, 1);
2002 	spin_lock_init(&hisi_hba->lock);
2003 	for (i = 0; i < hisi_hba->n_phy; i++) {
2004 		hisi_sas_phy_init(hisi_hba, i);
2005 		hisi_hba->port[i].port_attached = 0;
2006 		hisi_hba->port[i].id = -1;
2007 	}
2008 
2009 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2010 		hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2011 		hisi_hba->devices[i].device_id = i;
2012 		hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
2013 	}
2014 
2015 	for (i = 0; i < hisi_hba->queue_count; i++) {
2016 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2017 		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2018 
2019 		/* Completion queue structure */
2020 		cq->id = i;
2021 		cq->hisi_hba = hisi_hba;
2022 
2023 		/* Delivery queue structure */
2024 		spin_lock_init(&dq->lock);
2025 		INIT_LIST_HEAD(&dq->list);
2026 		dq->id = i;
2027 		dq->hisi_hba = hisi_hba;
2028 
2029 		/* Delivery queue */
2030 		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2031 		hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2032 						&hisi_hba->cmd_hdr_dma[i],
2033 						GFP_KERNEL);
2034 		if (!hisi_hba->cmd_hdr[i])
2035 			goto err_out;
2036 
2037 		/* Completion queue */
2038 		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2039 		hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2040 						&hisi_hba->complete_hdr_dma[i],
2041 						GFP_KERNEL);
2042 		if (!hisi_hba->complete_hdr[i])
2043 			goto err_out;
2044 	}
2045 
2046 	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2047 	hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2048 					     GFP_KERNEL);
2049 	if (!hisi_hba->itct)
2050 		goto err_out;
2051 	memset(hisi_hba->itct, 0, s);
2052 
2053 	hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2054 					   sizeof(struct hisi_sas_slot),
2055 					   GFP_KERNEL);
2056 	if (!hisi_hba->slot_info)
2057 		goto err_out;
2058 
2059 	/* roundup to avoid overly large block size */
2060 	max_command_entries_ru = roundup(max_command_entries, 64);
2061 	sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64);
2062 	s = lcm(max_command_entries_ru, sz_slot_buf_ru);
2063 	blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2064 	slots_per_blk = s / sz_slot_buf_ru;
2065 	for (i = 0; i < blk_cnt; i++) {
2066 		struct hisi_sas_slot_buf_table *buf;
2067 		dma_addr_t buf_dma;
2068 		int slot_index = i * slots_per_blk;
2069 
2070 		buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL);
2071 		if (!buf)
2072 			goto err_out;
2073 		memset(buf, 0, s);
2074 
2075 		for (j = 0; j < slots_per_blk; j++, slot_index++) {
2076 			struct hisi_sas_slot *slot;
2077 
2078 			slot = &hisi_hba->slot_info[slot_index];
2079 			slot->buf = buf;
2080 			slot->buf_dma = buf_dma;
2081 			slot->idx = slot_index;
2082 
2083 			buf++;
2084 			buf_dma += sizeof(*buf);
2085 		}
2086 	}
2087 
2088 	s = max_command_entries * sizeof(struct hisi_sas_iost);
2089 	hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2090 					     GFP_KERNEL);
2091 	if (!hisi_hba->iost)
2092 		goto err_out;
2093 
2094 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2095 	hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2096 						   &hisi_hba->breakpoint_dma,
2097 						   GFP_KERNEL);
2098 	if (!hisi_hba->breakpoint)
2099 		goto err_out;
2100 
2101 	hisi_hba->slot_index_count = max_command_entries;
2102 	s = hisi_hba->slot_index_count / BITS_PER_BYTE;
2103 	hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
2104 	if (!hisi_hba->slot_index_tags)
2105 		goto err_out;
2106 
2107 	s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2108 	hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2109 						    &hisi_hba->initial_fis_dma,
2110 						    GFP_KERNEL);
2111 	if (!hisi_hba->initial_fis)
2112 		goto err_out;
2113 
2114 	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2115 	hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2116 					&hisi_hba->sata_breakpoint_dma,
2117 					GFP_KERNEL);
2118 	if (!hisi_hba->sata_breakpoint)
2119 		goto err_out;
2120 	hisi_sas_init_mem(hisi_hba);
2121 
2122 	hisi_sas_slot_index_init(hisi_hba);
2123 
2124 	hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2125 	if (!hisi_hba->wq) {
2126 		dev_err(dev, "sas_alloc: failed to create workqueue\n");
2127 		goto err_out;
2128 	}
2129 
2130 	return 0;
2131 err_out:
2132 	return -ENOMEM;
2133 }
2134 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2135 
2136 void hisi_sas_free(struct hisi_hba *hisi_hba)
2137 {
2138 	if (hisi_hba->wq)
2139 		destroy_workqueue(hisi_hba->wq);
2140 }
2141 EXPORT_SYMBOL_GPL(hisi_sas_free);
2142 
2143 void hisi_sas_rst_work_handler(struct work_struct *work)
2144 {
2145 	struct hisi_hba *hisi_hba =
2146 		container_of(work, struct hisi_hba, rst_work);
2147 
2148 	hisi_sas_controller_reset(hisi_hba);
2149 }
2150 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2151 
2152 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2153 {
2154 	struct hisi_sas_rst *rst =
2155 		container_of(work, struct hisi_sas_rst, work);
2156 
2157 	if (!hisi_sas_controller_reset(rst->hisi_hba))
2158 		rst->done = true;
2159 	complete(rst->completion);
2160 }
2161 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2162 
2163 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2164 {
2165 	struct device *dev = hisi_hba->dev;
2166 	struct platform_device *pdev = hisi_hba->platform_dev;
2167 	struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2168 	struct clk *refclk;
2169 
2170 	if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2171 					  SAS_ADDR_SIZE)) {
2172 		dev_err(dev, "could not get property sas-addr\n");
2173 		return -ENOENT;
2174 	}
2175 
2176 	if (np) {
2177 		/*
2178 		 * These properties are only required for platform device-based
2179 		 * controller with DT firmware.
2180 		 */
2181 		hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2182 					"hisilicon,sas-syscon");
2183 		if (IS_ERR(hisi_hba->ctrl)) {
2184 			dev_err(dev, "could not get syscon\n");
2185 			return -ENOENT;
2186 		}
2187 
2188 		if (device_property_read_u32(dev, "ctrl-reset-reg",
2189 					     &hisi_hba->ctrl_reset_reg)) {
2190 			dev_err(dev,
2191 				"could not get property ctrl-reset-reg\n");
2192 			return -ENOENT;
2193 		}
2194 
2195 		if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2196 					     &hisi_hba->ctrl_reset_sts_reg)) {
2197 			dev_err(dev,
2198 				"could not get property ctrl-reset-sts-reg\n");
2199 			return -ENOENT;
2200 		}
2201 
2202 		if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2203 					     &hisi_hba->ctrl_clock_ena_reg)) {
2204 			dev_err(dev,
2205 				"could not get property ctrl-clock-ena-reg\n");
2206 			return -ENOENT;
2207 		}
2208 	}
2209 
2210 	refclk = devm_clk_get(dev, NULL);
2211 	if (IS_ERR(refclk))
2212 		dev_dbg(dev, "no ref clk property\n");
2213 	else
2214 		hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2215 
2216 	if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2217 		dev_err(dev, "could not get property phy-count\n");
2218 		return -ENOENT;
2219 	}
2220 
2221 	if (device_property_read_u32(dev, "queue-count",
2222 				     &hisi_hba->queue_count)) {
2223 		dev_err(dev, "could not get property queue-count\n");
2224 		return -ENOENT;
2225 	}
2226 
2227 	return 0;
2228 }
2229 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2230 
2231 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2232 					      const struct hisi_sas_hw *hw)
2233 {
2234 	struct resource *res;
2235 	struct Scsi_Host *shost;
2236 	struct hisi_hba *hisi_hba;
2237 	struct device *dev = &pdev->dev;
2238 
2239 	shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2240 	if (!shost) {
2241 		dev_err(dev, "scsi host alloc failed\n");
2242 		return NULL;
2243 	}
2244 	hisi_hba = shost_priv(shost);
2245 
2246 	INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2247 	hisi_hba->hw = hw;
2248 	hisi_hba->dev = dev;
2249 	hisi_hba->platform_dev = pdev;
2250 	hisi_hba->shost = shost;
2251 	SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2252 
2253 	timer_setup(&hisi_hba->timer, NULL, 0);
2254 
2255 	if (hisi_sas_get_fw_info(hisi_hba) < 0)
2256 		goto err_out;
2257 
2258 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2259 	    dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2260 		dev_err(dev, "No usable DMA addressing method\n");
2261 		goto err_out;
2262 	}
2263 
2264 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2265 	hisi_hba->regs = devm_ioremap_resource(dev, res);
2266 	if (IS_ERR(hisi_hba->regs))
2267 		goto err_out;
2268 
2269 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2270 	if (res) {
2271 		hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2272 		if (IS_ERR(hisi_hba->sgpio_regs))
2273 			goto err_out;
2274 	}
2275 
2276 	if (hisi_sas_alloc(hisi_hba, shost)) {
2277 		hisi_sas_free(hisi_hba);
2278 		goto err_out;
2279 	}
2280 
2281 	return shost;
2282 err_out:
2283 	scsi_host_put(shost);
2284 	dev_err(dev, "shost alloc failed\n");
2285 	return NULL;
2286 }
2287 
2288 int hisi_sas_probe(struct platform_device *pdev,
2289 		   const struct hisi_sas_hw *hw)
2290 {
2291 	struct Scsi_Host *shost;
2292 	struct hisi_hba *hisi_hba;
2293 	struct device *dev = &pdev->dev;
2294 	struct asd_sas_phy **arr_phy;
2295 	struct asd_sas_port **arr_port;
2296 	struct sas_ha_struct *sha;
2297 	int rc, phy_nr, port_nr, i;
2298 
2299 	shost = hisi_sas_shost_alloc(pdev, hw);
2300 	if (!shost)
2301 		return -ENOMEM;
2302 
2303 	sha = SHOST_TO_SAS_HA(shost);
2304 	hisi_hba = shost_priv(shost);
2305 	platform_set_drvdata(pdev, sha);
2306 
2307 	phy_nr = port_nr = hisi_hba->n_phy;
2308 
2309 	arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2310 	arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2311 	if (!arr_phy || !arr_port) {
2312 		rc = -ENOMEM;
2313 		goto err_out_ha;
2314 	}
2315 
2316 	sha->sas_phy = arr_phy;
2317 	sha->sas_port = arr_port;
2318 	sha->lldd_ha = hisi_hba;
2319 
2320 	shost->transportt = hisi_sas_stt;
2321 	shost->max_id = HISI_SAS_MAX_DEVICES;
2322 	shost->max_lun = ~0;
2323 	shost->max_channel = 1;
2324 	shost->max_cmd_len = 16;
2325 	shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2326 	shost->can_queue = hisi_hba->hw->max_command_entries;
2327 	shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2328 
2329 	sha->sas_ha_name = DRV_NAME;
2330 	sha->dev = hisi_hba->dev;
2331 	sha->lldd_module = THIS_MODULE;
2332 	sha->sas_addr = &hisi_hba->sas_addr[0];
2333 	sha->num_phys = hisi_hba->n_phy;
2334 	sha->core.shost = hisi_hba->shost;
2335 
2336 	for (i = 0; i < hisi_hba->n_phy; i++) {
2337 		sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2338 		sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2339 	}
2340 
2341 	rc = scsi_add_host(shost, &pdev->dev);
2342 	if (rc)
2343 		goto err_out_ha;
2344 
2345 	rc = sas_register_ha(sha);
2346 	if (rc)
2347 		goto err_out_register_ha;
2348 
2349 	rc = hisi_hba->hw->hw_init(hisi_hba);
2350 	if (rc)
2351 		goto err_out_register_ha;
2352 
2353 	scsi_scan_host(shost);
2354 
2355 	return 0;
2356 
2357 err_out_register_ha:
2358 	scsi_remove_host(shost);
2359 err_out_ha:
2360 	hisi_sas_free(hisi_hba);
2361 	scsi_host_put(shost);
2362 	return rc;
2363 }
2364 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2365 
2366 int hisi_sas_remove(struct platform_device *pdev)
2367 {
2368 	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2369 	struct hisi_hba *hisi_hba = sha->lldd_ha;
2370 	struct Scsi_Host *shost = sha->core.shost;
2371 
2372 	if (timer_pending(&hisi_hba->timer))
2373 		del_timer(&hisi_hba->timer);
2374 
2375 	sas_unregister_ha(sha);
2376 	sas_remove_host(sha->core.shost);
2377 
2378 	hisi_sas_free(hisi_hba);
2379 	scsi_host_put(shost);
2380 	return 0;
2381 }
2382 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2383 
2384 static __init int hisi_sas_init(void)
2385 {
2386 	hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2387 	if (!hisi_sas_stt)
2388 		return -ENOMEM;
2389 
2390 	return 0;
2391 }
2392 
2393 static __exit void hisi_sas_exit(void)
2394 {
2395 	sas_release_transport(hisi_sas_stt);
2396 }
2397 
2398 module_init(hisi_sas_init);
2399 module_exit(hisi_sas_exit);
2400 
2401 MODULE_LICENSE("GPL");
2402 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2403 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2404 MODULE_ALIAS("platform:" DRV_NAME);
2405