xref: /freebsd/sys/dev/mpi3mr/mpi3mr_cam.c (revision e453e498cbb88570a3ff7b3679de65c88707da95)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020-2025, Broadcom Inc. All rights reserved.
5  * Support: <fbsd-storage-driver.pdl@broadcom.com>
6  *
7  * Authors: Sumit Saxena <sumit.saxena@broadcom.com>
8  *	    Chandrakanth Patil <chandrakanth.patil@broadcom.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation and/or other
18  *    materials provided with the distribution.
19  * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software without
21  *    specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * The views and conclusions contained in the software and documentation are
36  * those of the authors and should not be interpreted as representing
37  * official policies,either expressed or implied, of the FreeBSD Project.
38  *
39  * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40  *
41  * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42  */
43 
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/selinfo.h>
49 #include <sys/module.h>
50 #include <sys/bus.h>
51 #include <sys/conf.h>
52 #include <sys/bio.h>
53 #include <sys/malloc.h>
54 #include <sys/uio.h>
55 #include <sys/sysctl.h>
56 #include <sys/endian.h>
57 #include <sys/queue.h>
58 #include <sys/kthread.h>
59 #include <sys/taskqueue.h>
60 #include <sys/sbuf.h>
61 #include <sys/stdarg.h>
62 
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <sys/rman.h>
66 
67 #include <cam/cam.h>
68 #include <cam/cam_ccb.h>
69 #include <cam/cam_debug.h>
70 #include <cam/cam_sim.h>
71 #include <cam/cam_xpt_sim.h>
72 #include <cam/cam_xpt_periph.h>
73 #include <cam/cam_periph.h>
74 #include <cam/scsi/scsi_all.h>
75 #include <cam/scsi/scsi_message.h>
76 #include <cam/scsi/smp_all.h>
77 
78 #include <dev/nvme/nvme.h>
79 #include "mpi/mpi30_api.h"
80 #include "mpi3mr_cam.h"
81 #include "mpi3mr.h"
82 #include <sys/time.h>			/* XXX for pcpu.h */
83 #include <sys/pcpu.h>			/* XXX for PCPU_GET */
84 #include <asm/unaligned.h>
85 
86 #define	smp_processor_id()  PCPU_GET(cpuid)
87 
88 static void
89 mpi3mr_enqueue_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm);
90 static void
91 mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm);
92 void
93 mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc);
94 static void
95 mpi3mr_freeup_events(struct mpi3mr_softc *sc);
96 
97 extern int
98 mpi3mr_register_events(struct mpi3mr_softc *sc);
99 extern void mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
100     bus_addr_t dma_addr);
101 
102 static U32 event_count;
103 
104 static
mpi3mr_divert_ws(Mpi3SCSIIORequest_t * req,struct ccb_scsiio * csio,U16 ws_len)105 inline void mpi3mr_divert_ws(Mpi3SCSIIORequest_t *req,
106 			     struct ccb_scsiio *csio,
107 			     U16 ws_len)
108 {
109 	U8 unmap = 0, ndob = 0;
110 	U32 num_blocks = 0;
111 	U8 opcode = scsiio_cdb_ptr(csio)[0];
112 	U16 service_action = ((scsiio_cdb_ptr(csio)[8] << 8) | scsiio_cdb_ptr(csio)[9]);
113 
114 
115 	if (opcode == WRITE_SAME_16 ||
116 	   (opcode == VARIABLE_LEN_CDB &&
117 	    service_action == WRITE_SAME_32)) {
118 
119 		int unmap_ndob_index = (opcode == WRITE_SAME_16) ? 1 : 10;
120 
121 		unmap = scsiio_cdb_ptr(csio)[unmap_ndob_index] & 0x08;
122 		ndob = scsiio_cdb_ptr(csio)[unmap_ndob_index] & 0x01;
123 		num_blocks = get_unaligned_be32(scsiio_cdb_ptr(csio) +
124 						((opcode == WRITE_SAME_16) ? 10 : 28));
125 
126 		/* Check conditions for diversion to firmware */
127 		if (unmap && ndob && num_blocks > ws_len) {
128 			req->MsgFlags |= MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
129 			req->Flags = htole32(le32toh(req->Flags) |
130 					     MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE);
131 		}
132 	}
133 }
134 
mpi3mr_prepare_sgls(void * arg,bus_dma_segment_t * segs,int nsegs,int error)135 static void mpi3mr_prepare_sgls(void *arg,
136 	bus_dma_segment_t *segs, int nsegs, int error)
137 {
138 	struct mpi3mr_softc *sc;
139 	struct mpi3mr_cmd *cm;
140 	u_int i;
141 	bus_addr_t chain_dma;
142 	void *chain;
143 	U8 *sg_local;
144 	U32 chain_length;
145 	int sges_left;
146 	U32 sges_in_segment;
147 	U8 simple_sgl_flags;
148 	U8 simple_sgl_flags_last;
149 	U8 last_chain_sgl_flags;
150 	struct mpi3mr_chain *chain_req;
151 	Mpi3SCSIIORequest_t *scsiio_req;
152 	union ccb *ccb;
153 
154 	cm = (struct mpi3mr_cmd *)arg;
155 	sc = cm->sc;
156 	scsiio_req = (Mpi3SCSIIORequest_t *) &cm->io_request;
157 	ccb = cm->ccb;
158 
159 	if (error) {
160 		device_printf(sc->mpi3mr_dev, "%s: error=%d\n",__func__, error);
161 		if (error == EFBIG) {
162 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_TOO_BIG);
163 		} else {
164 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
165 		}
166 		mpi3mr_release_command(cm);
167 		xpt_done(ccb);
168 		return;
169 	}
170 
171 	if (cm->data_dir == MPI3MR_READ)
172 		bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
173 		    BUS_DMASYNC_PREREAD);
174 	if (cm->data_dir == MPI3MR_WRITE)
175 		bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
176 		    BUS_DMASYNC_PREWRITE);
177 
178 	KASSERT(nsegs <= sc->max_sgl_entries && nsegs > 0,
179 	    ("%s: bad SGE count: %d\n", device_get_nameunit(sc->mpi3mr_dev), nsegs));
180 	KASSERT(scsiio_req->DataLength != 0,
181 	    ("%s: Data segments (%d), but DataLength == 0\n",
182 		device_get_nameunit(sc->mpi3mr_dev), nsegs));
183 
184 	simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
185 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
186 	simple_sgl_flags_last = simple_sgl_flags |
187 	    MPI3_SGE_FLAGS_END_OF_LIST;
188 	last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
189 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
190 
191 	sg_local = (U8 *)&scsiio_req->SGL;
192 
193 	sges_left = nsegs;
194 
195 	sges_in_segment = (sc->facts.op_req_sz -
196 	    offsetof(Mpi3SCSIIORequest_t, SGL))/sizeof(Mpi3SGESimple_t);
197 
198 	i = 0;
199 
200 	mpi3mr_dprint(sc, MPI3MR_TRACE, "SGE count: %d IO size: %d\n",
201 		nsegs, scsiio_req->DataLength);
202 
203 	if (sges_left <= sges_in_segment)
204 		goto fill_in_last_segment;
205 
206 	/* fill in main message segment when there is a chain following */
207 	while (sges_in_segment > 1) {
208 		mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
209 		    segs[i].ds_len, segs[i].ds_addr);
210 		sg_local += sizeof(Mpi3SGESimple_t);
211 		sges_left--;
212 		sges_in_segment--;
213 		i++;
214 	}
215 
216 	chain_req = &sc->chain_sgl_list[cm->hosttag];
217 
218 	chain = chain_req->buf;
219 	chain_dma = chain_req->buf_phys;
220 	memset(chain_req->buf, 0, sc->max_sgl_entries * sizeof(Mpi3SGESimple_t));
221 	sges_in_segment = sges_left;
222 	chain_length = sges_in_segment * sizeof(Mpi3SGESimple_t);
223 
224 	mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
225 	    chain_length, chain_dma);
226 
227 	sg_local = chain;
228 
229 fill_in_last_segment:
230 	while (sges_left > 0) {
231 		if (sges_left == 1)
232 			mpi3mr_add_sg_single(sg_local,
233 			    simple_sgl_flags_last, segs[i].ds_len,
234 			    segs[i].ds_addr);
235 		else
236 			mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
237 			    segs[i].ds_len, segs[i].ds_addr);
238 		sg_local += sizeof(Mpi3SGESimple_t);
239 		sges_left--;
240 		i++;
241 	}
242 
243 	/*
244 	 * Now that we've created the sgls, we send the request to the device.
245 	 * Unlike in Linux, dmaload isn't guaranteed to load every time, but
246 	 * this function is always called when the resources are available, so
247 	 * we can send the request to hardware here always. mpi3mr_map_request
248 	 * knows about this quirk and will only take evasive action when an
249 	 * error other than EINPROGRESS is returned from dmaload.
250 	 */
251 	mpi3mr_enqueue_request(sc, cm);
252 
253 	return;
254 }
255 
256 static void
mpi3mr_map_request(struct mpi3mr_softc * sc,struct mpi3mr_cmd * cm)257 mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm)
258 {
259 	u_int32_t retcode = 0;
260 	union ccb *ccb;
261 
262 	ccb = cm->ccb;
263 	if (cm->data != NULL) {
264 		mtx_lock(&sc->io_lock);
265 		/* Map data buffer into bus space */
266 		retcode = bus_dmamap_load_ccb(sc->buffer_dmat, cm->dmamap,
267 		    ccb, mpi3mr_prepare_sgls, cm, 0);
268 		mtx_unlock(&sc->io_lock);
269 		if (retcode != 0 && retcode != EINPROGRESS) {
270 			device_printf(sc->mpi3mr_dev,
271 			    "bus_dmamap_load(): retcode = %d\n", retcode);
272 			/*
273 			 * Any other error means prepare_sgls wasn't called, and
274 			 * will never be called, so we have to mop up. This error
275 			 * should never happen, though.
276 			 */
277 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
278 			mpi3mr_release_command(cm);
279 			xpt_done(ccb);
280 		}
281 	} else {
282 		/*
283 		 * No data, we enqueue it directly here.
284 		 */
285 		mpi3mr_enqueue_request(sc, cm);
286 	}
287 }
288 
289 void
mpi3mr_unmap_request(struct mpi3mr_softc * sc,struct mpi3mr_cmd * cmd)290 mpi3mr_unmap_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
291 {
292 	if (cmd->data != NULL) {
293 		if (cmd->data_dir == MPI3MR_READ)
294 			bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTREAD);
295 		if (cmd->data_dir == MPI3MR_WRITE)
296 			bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTWRITE);
297 		mtx_lock(&sc->io_lock);
298 		bus_dmamap_unload(sc->buffer_dmat, cmd->dmamap);
299 		mtx_unlock(&sc->io_lock);
300 	}
301 }
302 
303 /**
304  * mpi3mr_allow_unmap_to_fw - Whether an unmap is allowed to fw
305  * @sc: Adapter instance reference
306  * @ccb: SCSI Command reference
307  *
308  * The controller hardware cannot handle certain unmap commands
309  * for NVMe drives, this routine checks those and return true
310  * and completes the SCSI command with proper status and sense
311  * data.
312  *
313  * Return: TRUE for allowed unmap, FALSE otherwise.
314  */
mpi3mr_allow_unmap_to_fw(struct mpi3mr_softc * sc,union ccb * ccb)315 static bool mpi3mr_allow_unmap_to_fw(struct mpi3mr_softc *sc,
316 	union ccb *ccb)
317 {
318 	struct ccb_scsiio *csio;
319 	uint16_t param_list_len, block_desc_len, trunc_param_len = 0;
320 
321 	csio = &ccb->csio;
322 	param_list_len = (uint16_t) ((scsiio_cdb_ptr(csio)[7] << 8) | scsiio_cdb_ptr(csio)[8]);
323 
324 	switch(pci_get_revid(sc->mpi3mr_dev)) {
325 	case SAS4116_CHIP_REV_A0:
326 		if (!param_list_len) {
327 			mpi3mr_dprint(sc, MPI3MR_ERROR,
328 			    "%s: CDB received with zero parameter length\n",
329 			    __func__);
330 			mpi3mr_print_cdb(ccb);
331 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
332 			xpt_done(ccb);
333 			return false;
334 		}
335 
336 		if (param_list_len < 24) {
337 			mpi3mr_dprint(sc, MPI3MR_ERROR,
338 			    "%s: CDB received with invalid param_list_len: %d\n",
339 			    __func__, param_list_len);
340 			mpi3mr_print_cdb(ccb);
341 			scsi_set_sense_data(&ccb->csio.sense_data,
342 				/*sense_format*/ SSD_TYPE_FIXED,
343 				/*current_error*/ 1,
344 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
345 				/*asc*/ 0x1A,
346 				/*ascq*/ 0x00,
347 				/*extra args*/ SSD_ELEM_NONE);
348 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
349 			ccb->ccb_h.status =
350 			    CAM_SCSI_STATUS_ERROR |
351 			    CAM_AUTOSNS_VALID;
352 			return false;
353 		}
354 
355 		if (param_list_len != csio->dxfer_len) {
356 			mpi3mr_dprint(sc, MPI3MR_ERROR,
357 			    "%s: CDB received with param_list_len: %d bufflen: %d\n",
358 			    __func__, param_list_len, csio->dxfer_len);
359 			mpi3mr_print_cdb(ccb);
360 			scsi_set_sense_data(&ccb->csio.sense_data,
361 				/*sense_format*/ SSD_TYPE_FIXED,
362 				/*current_error*/ 1,
363 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
364 				/*asc*/ 0x1A,
365 				/*ascq*/ 0x00,
366 				/*extra args*/ SSD_ELEM_NONE);
367 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
368 			ccb->ccb_h.status =
369 			    CAM_SCSI_STATUS_ERROR |
370 			    CAM_AUTOSNS_VALID;
371 			xpt_done(ccb);
372 			return false;
373 		}
374 
375 		block_desc_len = (uint16_t) (csio->data_ptr[2] << 8 | csio->data_ptr[3]);
376 
377 		if (block_desc_len < 16) {
378 			mpi3mr_dprint(sc, MPI3MR_ERROR,
379 			    "%s: Invalid descriptor length in param list: %d\n",
380 			    __func__, block_desc_len);
381 			mpi3mr_print_cdb(ccb);
382 			scsi_set_sense_data(&ccb->csio.sense_data,
383 				/*sense_format*/ SSD_TYPE_FIXED,
384 				/*current_error*/ 1,
385 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
386 				/*asc*/ 0x26,
387 				/*ascq*/ 0x00,
388 				/*extra args*/ SSD_ELEM_NONE);
389 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
390 			ccb->ccb_h.status =
391 			    CAM_SCSI_STATUS_ERROR |
392 			    CAM_AUTOSNS_VALID;
393 			xpt_done(ccb);
394 			return false;
395 		}
396 
397 		if (param_list_len > (block_desc_len + 8)) {
398 			mpi3mr_print_cdb(ccb);
399 			mpi3mr_dprint(sc, MPI3MR_INFO,
400 			    "%s: Truncating param_list_len(%d) to block_desc_len+8(%d)\n",
401 			    __func__, param_list_len, (block_desc_len + 8));
402 			param_list_len = block_desc_len + 8;
403 			scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff;
404 			scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff;
405 			mpi3mr_print_cdb(ccb);
406 		}
407 		break;
408 
409 	case SAS4116_CHIP_REV_B0:
410 		if ((param_list_len > 24) && ((param_list_len - 8) & 0xF)) {
411 			trunc_param_len -= (param_list_len - 8) & 0xF;
412 			mpi3mr_print_cdb(ccb);
413 			mpi3mr_dprint(sc, MPI3MR_INFO,
414 			    "%s: Truncating param_list_len from (%d) to (%d)\n",
415 			    __func__, param_list_len, trunc_param_len);
416 			scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff;
417 			scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff;
418 			mpi3mr_print_cdb(ccb);
419 		}
420 		break;
421 	}
422 
423 	return true;
424 }
425 
426 /**
427  * mpi3mr_tm_response_name -  get TM response as a string
428  * @resp_code: TM response code
429  *
430  * Convert known task management response code as a readable
431  * string.
432  *
433  * Return: response code string.
434  */
mpi3mr_tm_response_name(U8 resp_code)435 static const char* mpi3mr_tm_response_name(U8 resp_code)
436 {
437 	char *desc;
438 
439 	switch (resp_code) {
440 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
441 		desc = "task management request completed";
442 		break;
443 	case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
444 		desc = "invalid frame";
445 		break;
446 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
447 		desc = "task management request not supported";
448 		break;
449 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
450 		desc = "task management request failed";
451 		break;
452 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
453 		desc = "task management request succeeded";
454 		break;
455 	case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
456 		desc = "invalid LUN";
457 		break;
458 	case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
459 		desc = "overlapped tag attempted";
460 		break;
461 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
462 		desc = "task queued, however not sent to target";
463 		break;
464 	case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
465 		desc = "task management request denied by NVMe device";
466 		break;
467 	default:
468 		desc = "unknown";
469 		break;
470 	}
471 
472 	return desc;
473 }
474 
mpi3mr_poll_pend_io_completions(struct mpi3mr_softc * sc)475 void mpi3mr_poll_pend_io_completions(struct mpi3mr_softc *sc)
476 {
477 	int i;
478 	int num_of_reply_queues = sc->num_queues;
479 	struct mpi3mr_irq_context *irq_ctx;
480 
481 	for (i = 0; i < num_of_reply_queues; i++) {
482 		irq_ctx = &sc->irq_ctx[i];
483 		mpi3mr_complete_io_cmd(sc, irq_ctx);
484 	}
485 }
486 
487 void
trigger_reset_from_watchdog(struct mpi3mr_softc * sc,U8 reset_type,U16 reset_reason)488 trigger_reset_from_watchdog(struct mpi3mr_softc *sc, U8 reset_type, U16 reset_reason)
489 {
490 	if (sc->reset_in_progress) {
491 		mpi3mr_dprint(sc, MPI3MR_INFO, "Another reset is in progress, no need to trigger the reset\n");
492 		return;
493 	}
494 	sc->reset.type = reset_type;
495 	sc->reset.reason = reset_reason;
496 
497 	return;
498 }
499 
500 /**
501  * mpi3mr_issue_tm - Issue Task Management request
502  * @sc: Adapter instance reference
503  * @tm_type: Task Management type
504  * @handle: Device handle
505  * @lun: lun ID
506  * @htag: Host tag of the TM request
507  * @timeout: TM timeout value
508  * @drv_cmd: Internal command tracker
509  * @resp_code: Response code place holder
510  * @cmd: Timed out command reference
511  *
512  * Issues a Task Management Request to the controller for a
513  * specified target, lun and command and wait for its completion
514  * and check TM response. Recover the TM if it timed out by
515  * issuing controller reset.
516  *
517  * Return: 0 on success, non-zero on errors
518  */
519 static int
mpi3mr_issue_tm(struct mpi3mr_softc * sc,struct mpi3mr_cmd * cmd,U8 tm_type,unsigned long timeout)520 mpi3mr_issue_tm(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd,
521 		U8 tm_type, unsigned long timeout)
522 {
523 	int retval = 0;
524 	MPI3_SCSI_TASK_MGMT_REQUEST tm_req;
525 	MPI3_SCSI_TASK_MGMT_REPLY *tm_reply = NULL;
526 	struct mpi3mr_drvr_cmd *drv_cmd = NULL;
527 	struct mpi3mr_target *tgtdev = NULL;
528 	struct mpi3mr_op_req_queue *op_req_q = NULL;
529 	union ccb *ccb;
530 	U8 resp_code;
531 
532 
533 	if (sc->unrecoverable) {
534 		mpi3mr_dprint(sc, MPI3MR_INFO,
535 			"Controller is in unrecoverable state!! TM not required\n");
536 		return retval;
537 	}
538 	if (sc->reset_in_progress) {
539 		mpi3mr_dprint(sc, MPI3MR_INFO,
540 			"controller reset in progress!! TM not required\n");
541 		return retval;
542 	}
543 
544 	if (!cmd->ccb) {
545 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
546 		return retval;
547 	}
548 	ccb = cmd->ccb;
549 
550 	tgtdev = cmd->targ;
551 	if (tgtdev == NULL)  {
552 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Device does not exist target ID:0x%x,"
553 			      "TM is not required\n", ccb->ccb_h.target_id);
554 		return retval;
555 	}
556 	if (tgtdev->dev_removed == 1)  {
557 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Device(0x%x) is removed, TM is not required\n",
558 			      ccb->ccb_h.target_id);
559 		return retval;
560 	}
561 
562 	drv_cmd = &sc->host_tm_cmds;
563 	mtx_lock(&drv_cmd->lock);
564 
565 	memset(&tm_req, 0, sizeof(tm_req));
566 	tm_req.DevHandle = htole16(tgtdev->dev_handle);
567 	tm_req.TaskType = tm_type;
568 	tm_req.HostTag = htole16(MPI3MR_HOSTTAG_TMS);
569 	int_to_lun(ccb->ccb_h.target_lun, tm_req.LUN);
570 	tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
571 	drv_cmd->state = MPI3MR_CMD_PENDING;
572 	drv_cmd->is_waiting = 1;
573 	drv_cmd->callback = NULL;
574 
575 	if (ccb) {
576 		if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
577 			op_req_q = &sc->op_req_q[cmd->req_qidx];
578 			tm_req.TaskHostTag = htole16(cmd->hosttag);
579 			tm_req.TaskRequestQueueID = htole16(op_req_q->qid);
580 		}
581 	}
582 
583 	if (tgtdev)
584 		mpi3mr_atomic_inc(&tgtdev->block_io);
585 
586 	if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
587 		if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
588 		     && tgtdev->dev_spec.pcie_inf.abort_to)
589  			timeout = tgtdev->dev_spec.pcie_inf.abort_to;
590 		else if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET)
591 			 && tgtdev->dev_spec.pcie_inf.reset_to)
592 			 timeout = tgtdev->dev_spec.pcie_inf.reset_to;
593 	}
594 
595 	sc->tm_chan = (void *)&drv_cmd;
596 
597 	mpi3mr_dprint(sc, MPI3MR_DEBUG_TM,
598 		      "posting task management request: type(%d), handle(0x%04x)\n",
599 		       tm_type, tgtdev->dev_handle);
600 
601 	init_completion(&drv_cmd->completion);
602 	retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
603 	if (retval) {
604 		mpi3mr_dprint(sc, MPI3MR_ERROR,
605 			      "posting task management request is failed\n");
606 		retval = -1;
607 		goto out_unlock;
608 	}
609 	wait_for_completion_timeout_tm(&drv_cmd->completion, timeout, sc);
610 
611 	if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
612 		drv_cmd->is_waiting = 0;
613 		retval = -1;
614 		if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
615 			mpi3mr_dprint(sc, MPI3MR_ERROR,
616 				      "task management request timed out after %ld seconds\n", timeout);
617 			if (sc->mpi3mr_debug & MPI3MR_DEBUG_TM) {
618 				mpi3mr_dprint(sc, MPI3MR_INFO, "tm_request dump\n");
619 				mpi3mr_hexdump(&tm_req, sizeof(tm_req), 8);
620 			}
621 			trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_TM_TIMEOUT);
622 			retval = ETIMEDOUT;
623 		}
624 		goto out_unlock;
625 	}
626 
627 	if (!(drv_cmd->state & MPI3MR_CMD_REPLYVALID)) {
628 		mpi3mr_dprint(sc, MPI3MR_ERROR,
629 			      "invalid task management reply message\n");
630 		retval = -1;
631 		goto out_unlock;
632 	}
633 	tm_reply = (MPI3_SCSI_TASK_MGMT_REPLY *)drv_cmd->reply;
634 
635 	switch (drv_cmd->ioc_status) {
636 	case MPI3_IOCSTATUS_SUCCESS:
637 		resp_code = tm_reply->ResponseData & MPI3MR_RI_MASK_RESPCODE;
638 		break;
639 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
640 		resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
641 		break;
642 	default:
643 		mpi3mr_dprint(sc, MPI3MR_ERROR,
644 			      "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
645 			       tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
646 		retval = -1;
647 		goto out_unlock;
648 	}
649 
650 	switch (resp_code) {
651 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
652 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
653 		break;
654 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
655 		if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
656 			retval = -1;
657 		break;
658 	default:
659 		retval = -1;
660 		break;
661 	}
662 
663 	mpi3mr_dprint(sc, MPI3MR_DEBUG_TM,
664 		      "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x)"
665 		      "termination_count(%u), response:%s(0x%x)\n", tm_type, tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
666 		      tm_reply->TerminationCount, mpi3mr_tm_response_name(resp_code), resp_code);
667 
668 	if (retval)
669 		goto out_unlock;
670 
671 	mpi3mr_disable_interrupts(sc);
672 	mpi3mr_poll_pend_io_completions(sc);
673 	mpi3mr_enable_interrupts(sc);
674 	mpi3mr_poll_pend_io_completions(sc);
675 
676 	switch (tm_type) {
677 	case MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
678 		if (cmd->state == MPI3MR_CMD_STATE_IN_TM) {
679 			mpi3mr_dprint(sc, MPI3MR_ERROR,
680 				      "%s: task abort returned success from firmware but corresponding CCB (%p) was not terminated"
681 				      "marking task abort failed!\n", sc->name, cmd->ccb);
682 			retval = -1;
683 		}
684 		break;
685 	case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
686 		if (mpi3mr_atomic_read(&tgtdev->outstanding)) {
687 			mpi3mr_dprint(sc, MPI3MR_ERROR,
688 				      "%s: target reset returned success from firmware but IOs are still pending on the target (%p)"
689 				      "marking target reset failed!\n",
690 				      sc->name, tgtdev);
691 			retval = -1;
692 		}
693 		break;
694 	default:
695 		break;
696 	}
697 
698 out_unlock:
699 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
700 	mtx_unlock(&drv_cmd->lock);
701 	if (tgtdev && mpi3mr_atomic_read(&tgtdev->block_io) > 0)
702 		mpi3mr_atomic_dec(&tgtdev->block_io);
703 
704 	return retval;
705 }
706 
707 /**
708  * mpi3mr_task_abort- Abort error handling callback
709  * @cmd: Timed out command reference
710  *
711  * Issue Abort Task Management if the command is in LLD scope
712  * and verify if it is aborted successfully and return status
713  * accordingly.
714  *
715  * Return: SUCCESS of successful abort the SCSI command else FAILED
716  */
mpi3mr_task_abort(struct mpi3mr_cmd * cmd)717 static int mpi3mr_task_abort(struct mpi3mr_cmd *cmd)
718 {
719 	int retval = 0;
720 	struct mpi3mr_softc *sc;
721 	union ccb *ccb;
722 
723 	sc = cmd->sc;
724 
725 	if (!cmd->ccb) {
726 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
727 		return retval;
728 	}
729 	ccb = cmd->ccb;
730 
731 	mpi3mr_dprint(sc, MPI3MR_INFO,
732 		      "attempting abort task for ccb(%p)\n", ccb);
733 
734 	mpi3mr_print_cdb(ccb);
735 
736 	if (cmd->state != MPI3MR_CMD_STATE_BUSY) {
737 		mpi3mr_dprint(sc, MPI3MR_INFO,
738 			      "%s: ccb is not in driver scope, abort task is not required\n",
739 			      sc->name);
740 		return retval;
741 	}
742 	cmd->state = MPI3MR_CMD_STATE_IN_TM;
743 
744 	retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, MPI3MR_ABORTTM_TIMEOUT);
745 
746 	mpi3mr_dprint(sc, MPI3MR_INFO,
747 		      "abort task is %s for ccb(%p)\n", ((retval == 0) ? "SUCCESS" : "FAILED"), ccb);
748 
749 	return retval;
750 }
751 
752 /**
753  * mpi3mr_target_reset - Target reset error handling callback
754  * @cmd: Timed out command reference
755  *
756  * Issue Target reset Task Management and verify the SCSI commands are
757  * terminated successfully and return status accordingly.
758  *
759  * Return: SUCCESS of successful termination of the SCSI commands else
760  *         FAILED
761  */
mpi3mr_target_reset(struct mpi3mr_cmd * cmd)762 static int mpi3mr_target_reset(struct mpi3mr_cmd *cmd)
763 {
764 	int retval = 0;
765 	struct mpi3mr_softc *sc;
766 	struct mpi3mr_target *target;
767 
768 	sc = cmd->sc;
769 
770 	target = cmd->targ;
771 	if (target == NULL)  {
772 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device does not exist for target:0x%p,"
773 			      "target reset is not required\n", target);
774 		return retval;
775 	}
776 
777 	mpi3mr_dprint(sc, MPI3MR_INFO,
778 		      "attempting target reset on target(%d)\n", target->per_id);
779 
780 
781 	if (mpi3mr_atomic_read(&target->outstanding)) {
782 		mpi3mr_dprint(sc, MPI3MR_INFO,
783 			      "no outstanding IOs on the target(%d),"
784 			      " target reset not required.\n", target->per_id);
785 		return retval;
786 	}
787 
788 	retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, MPI3MR_RESETTM_TIMEOUT);
789 
790 	mpi3mr_dprint(sc, MPI3MR_INFO,
791 		      "target reset is %s for target(%d)\n", ((retval == 0) ? "SUCCESS" : "FAILED"),
792 		      target->per_id);
793 
794 	return retval;
795 }
796 
797 /**
798  * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
799  * @sc: Adapter instance reference
800  *
801  * Calculate the pending I/Os for the controller and return.
802  *
803  * Return: Number of pending I/Os
804  */
mpi3mr_get_fw_pending_ios(struct mpi3mr_softc * sc)805 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_softc *sc)
806 {
807 	U16 i, pend_ios = 0;
808 
809 	for (i = 0; i < sc->num_queues; i++)
810 		pend_ios += mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios);
811 	return pend_ios;
812 }
813 
814 /**
815  * mpi3mr_wait_for_host_io - block for I/Os to complete
816  * @sc: Adapter instance reference
817  * @timeout: time out in seconds
818  *
819  * Waits for pending I/Os for the given adapter to complete or
820  * to hit the timeout.
821  *
822  * Return: Nothing
823  */
mpi3mr_wait_for_host_io(struct mpi3mr_softc * sc,U32 timeout)824 static int mpi3mr_wait_for_host_io(struct mpi3mr_softc *sc, U32 timeout)
825 {
826 	enum mpi3mr_iocstate iocstate;
827 
828 	iocstate = mpi3mr_get_iocstate(sc);
829 	if (iocstate != MRIOC_STATE_READY) {
830 		mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller is in NON-READY state! Proceed with Reset\n", __func__);
831 		return -1;
832 	}
833 
834 	if (!mpi3mr_get_fw_pending_ios(sc))
835 		return 0;
836 
837 	mpi3mr_dprint(sc, MPI3MR_INFO,
838 		      "%s :Waiting for %d seconds prior to reset for %d pending I/Os to complete\n",
839 		      __func__, timeout, mpi3mr_get_fw_pending_ios(sc));
840 
841 	int i;
842 	for (i = 0; i < timeout; i++) {
843 		if (!mpi3mr_get_fw_pending_ios(sc)) {
844 			mpi3mr_dprint(sc, MPI3MR_INFO, "%s :All pending I/Os got completed while waiting! Reset not required\n", __func__);
845 			return 0;
846 
847 		}
848 		iocstate = mpi3mr_get_iocstate(sc);
849 		if (iocstate != MRIOC_STATE_READY) {
850 			mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller state becomes NON-READY while waiting! dont wait further"
851 				      "Proceed with Reset\n", __func__);
852 			return -1;
853 		}
854 		DELAY(1000 * 1000);
855 	}
856 
857 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Pending I/Os after wait exaust is %d! Proceed with Reset\n", __func__,
858 		      mpi3mr_get_fw_pending_ios(sc));
859 
860 	return -1;
861 }
862 
863 static void
mpi3mr_scsiio_timeout(void * data)864 mpi3mr_scsiio_timeout(void *data)
865 {
866 	int retval = 0;
867 	struct mpi3mr_softc *sc;
868 	struct mpi3mr_cmd *cmd;
869 	struct mpi3mr_target *targ_dev = NULL;
870 
871 	if (!data)
872 		return;
873 
874 	cmd = (struct mpi3mr_cmd *)data;
875 	sc = cmd->sc;
876 
877 	if (cmd->ccb == NULL) {
878 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
879 		return;
880 	}
881 
882 	/*
883 	 * TMs are not supported for IO timeouts on VD/LD, so directly issue controller reset
884 	 * with max timeout for outstanding IOs to complete is 180sec.
885 	 */
886 	targ_dev = cmd->targ;
887 	if (targ_dev && (targ_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)) {
888 		if (mpi3mr_wait_for_host_io(sc, MPI3MR_RAID_ERRREC_RESET_TIMEOUT))
889 			trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT);
890 		return;
891  	}
892 
893 	/* Issue task abort to recover the timed out IO */
894 	retval = mpi3mr_task_abort(cmd);
895 	if (!retval || (retval == ETIMEDOUT))
896 		return;
897 
898 	/*
899 	 * task abort has failed to recover the timed out IO,
900 	 * try with the target reset
901 	 */
902 	retval = mpi3mr_target_reset(cmd);
903 	if (!retval || (retval == ETIMEDOUT))
904 		return;
905 
906 	/*
907 	 * task abort and target reset has failed. So issue Controller reset(soft reset)
908 	 * through OCR thread context
909 	 */
910 	trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT);
911 
912 	return;
913 }
914 
int_to_lun(unsigned int lun,U8 * req_lun)915 void int_to_lun(unsigned int lun, U8 *req_lun)
916 {
917 	int i;
918 
919 	memset(req_lun, 0, sizeof(*req_lun));
920 
921 	for (i = 0; i < sizeof(lun); i += 2) {
922 		req_lun[i] = (lun >> 8) & 0xFF;
923 		req_lun[i+1] = lun & 0xFF;
924 		lun = lun >> 16;
925 	}
926 
927 }
928 
get_req_queue_index(struct mpi3mr_softc * sc)929 static U16 get_req_queue_index(struct mpi3mr_softc *sc)
930 {
931 	U16 i = 0, reply_q_index = 0, reply_q_pend_ios = 0;
932 
933 	reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[0].pend_ios);
934 	for (i = 0; i < sc->num_queues; i++) {
935 		if (reply_q_pend_ios > mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios)) {
936 			reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios);
937 			reply_q_index = i;
938 		}
939 	}
940 
941 	return reply_q_index;
942 }
943 
944 static void
mpi3mr_action_scsiio(struct mpi3mr_cam_softc * cam_sc,union ccb * ccb)945 mpi3mr_action_scsiio(struct mpi3mr_cam_softc *cam_sc, union ccb *ccb)
946 {
947 	Mpi3SCSIIORequest_t *req = NULL;
948 	struct ccb_scsiio *csio;
949 	struct mpi3mr_softc *sc;
950 	struct mpi3mr_target *targ;
951 	struct mpi3mr_cmd *cm;
952 	uint8_t scsi_opcode, queue_idx;
953 	uint32_t mpi_control;
954 
955 	sc = cam_sc->sc;
956 	mtx_assert(&sc->mpi3mr_mtx, MA_OWNED);
957 
958 	if (sc->unrecoverable) {
959 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
960 		xpt_done(ccb);
961 		return;
962 	}
963 
964 	csio = &ccb->csio;
965 	KASSERT(csio->ccb_h.target_id < cam_sc->maxtargets,
966 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
967 	     csio->ccb_h.target_id));
968 
969 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
970 
971 	if ((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) &&
972 	    !((scsi_opcode == SYNCHRONIZE_CACHE) ||
973 	      (scsi_opcode == START_STOP_UNIT))) {
974 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
975 		xpt_done(ccb);
976 		return;
977 	}
978 
979 	targ = mpi3mr_find_target_by_per_id(cam_sc, csio->ccb_h.target_id);
980 	if (targ == NULL)  {
981 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x does not exist\n",
982 			      csio->ccb_h.target_id);
983 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
984 		xpt_done(ccb);
985 		return;
986 	}
987 
988 	if (targ && targ->is_hidden)  {
989 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is hidden\n",
990 			      csio->ccb_h.target_id);
991 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
992 		xpt_done(ccb);
993 		return;
994 	}
995 
996 	if (targ->dev_removed == 1)  {
997 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is removed\n", csio->ccb_h.target_id);
998 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
999 		xpt_done(ccb);
1000 		return;
1001 	}
1002 
1003 	if (targ->dev_handle == 0x0) {
1004 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s NULL handle for target 0x%x\n",
1005 		    __func__, csio->ccb_h.target_id);
1006 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1007 		xpt_done(ccb);
1008 		return;
1009 	}
1010 
1011 	if (mpi3mr_atomic_read(&targ->block_io) ||
1012 		(sc->reset_in_progress == 1) || (sc->prepare_for_reset == 1)) {
1013 		mpi3mr_dprint(sc, MPI3MR_TRACE, "%s target is busy target_id: 0x%x\n",
1014 		    __func__, csio->ccb_h.target_id);
1015 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
1016 		xpt_done(ccb);
1017 		return;
1018 	}
1019 
1020 	/*
1021 	 * Sometimes, it is possible to get a command that is not "In
1022 	 * Progress" and was actually aborted by the upper layer.  Check for
1023 	 * this here and complete the command without error.
1024 	 */
1025 	if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1026 		mpi3mr_dprint(sc, MPI3MR_TRACE, "%s Command is not in progress for "
1027 		    "target %u\n", __func__, csio->ccb_h.target_id);
1028 		xpt_done(ccb);
1029 		return;
1030 	}
1031 	/*
1032 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1033 	 * that the volume has timed out.  We want volumes to be enumerated
1034 	 * until they are deleted/removed, not just failed.
1035 	 */
1036 	if (targ->flags & MPI3MRSAS_TARGET_INREMOVAL) {
1037 		if (targ->devinfo == 0)
1038 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1039 		else
1040 			mpi3mr_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1041 		xpt_done(ccb);
1042 		return;
1043 	}
1044 
1045 	if ((scsi_opcode == UNMAP) &&
1046 		(pci_get_device(sc->mpi3mr_dev) == MPI3_MFGPAGE_DEVID_SAS4116) &&
1047 		(targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
1048 		(mpi3mr_allow_unmap_to_fw(sc, ccb) == false))
1049 		return;
1050 
1051 	cm = mpi3mr_get_command(sc);
1052 	if (cm == NULL || (sc->mpi3mr_flags & MPI3MR_FLAGS_DIAGRESET)) {
1053 		if (cm != NULL) {
1054 			mpi3mr_release_command(cm);
1055 		}
1056 		if ((cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) == 0) {
1057 			xpt_freeze_simq(cam_sc->sim, 1);
1058 			cam_sc->flags |= MPI3MRSAS_QUEUE_FROZEN;
1059 		}
1060 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1061 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
1062 		xpt_done(ccb);
1063 		return;
1064 	}
1065 
1066 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1067 	case CAM_DIR_IN:
1068 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
1069 		cm->data_dir = MPI3MR_READ;
1070 		break;
1071 	case CAM_DIR_OUT:
1072 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
1073 		cm->data_dir = MPI3MR_WRITE;
1074 		break;
1075 	case CAM_DIR_NONE:
1076 	default:
1077 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
1078 		break;
1079 	}
1080 
1081 	if (csio->cdb_len > 16)
1082 		mpi_control |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
1083 
1084 	req = (Mpi3SCSIIORequest_t *)&cm->io_request;
1085 	bzero(req, sizeof(*req));
1086 	req->Function = MPI3_FUNCTION_SCSI_IO;
1087 	req->HostTag = cm->hosttag;
1088 	req->DataLength = htole32(csio->dxfer_len);
1089 	req->DevHandle = htole16(targ->dev_handle);
1090 
1091 	/*
1092 	 * It looks like the hardware doesn't require an explicit tag
1093 	 * number for each transaction.  SAM Task Management not supported
1094 	 * at the moment.
1095 	 */
1096 	switch (csio->tag_action) {
1097 	case MSG_HEAD_OF_Q_TAG:
1098 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ;
1099 		break;
1100 	case MSG_ORDERED_Q_TAG:
1101 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ;
1102 		break;
1103 	case MSG_ACA_TASK:
1104 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ;
1105 		break;
1106 	case CAM_TAG_ACTION_NONE:
1107 	case MSG_SIMPLE_Q_TAG:
1108 	default:
1109 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
1110 		break;
1111 	}
1112 
1113 	if (targ->ws_len)
1114 		mpi3mr_divert_ws(req, csio, targ->ws_len);
1115 
1116 	req->Flags = htole32(mpi_control);
1117 
1118 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1119 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1120 	else {
1121 		KASSERT(csio->cdb_len <= IOCDBLEN,
1122 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
1123 		    "is not set", csio->cdb_len));
1124 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1125 	}
1126 
1127 	cm->length = csio->dxfer_len;
1128 	cm->targ = targ;
1129 	int_to_lun(csio->ccb_h.target_lun, req->LUN);
1130 	cm->ccb = ccb;
1131 	csio->ccb_h.qos.sim_data = sbinuptime();
1132 	queue_idx = get_req_queue_index(sc);
1133 	cm->req_qidx = queue_idx;
1134 
1135 	mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]: func: %s line:%d CDB: 0x%x targetid: %x SMID: 0x%x\n",
1136 		(queue_idx + 1), __func__, __LINE__, scsi_opcode, csio->ccb_h.target_id, cm->hosttag);
1137 
1138 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
1139 	case CAM_DATA_PADDR:
1140 	case CAM_DATA_SG_PADDR:
1141 		device_printf(sc->mpi3mr_dev, "%s: physical addresses not supported\n",
1142 		    __func__);
1143 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_INVALID);
1144 		mpi3mr_release_command(cm);
1145 		xpt_done(ccb);
1146 		return;
1147 	case CAM_DATA_SG:
1148 		device_printf(sc->mpi3mr_dev, "%s: scatter gather is not supported\n",
1149 		    __func__);
1150 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_INVALID);
1151 		mpi3mr_release_command(cm);
1152 		xpt_done(ccb);
1153 		return;
1154 	case CAM_DATA_VADDR:
1155 	case CAM_DATA_BIO:
1156 		if (csio->dxfer_len > (sc->max_sgl_entries * PAGE_SIZE)) {
1157 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_TOO_BIG);
1158 			mpi3mr_release_command(cm);
1159 			xpt_done(ccb);
1160 			return;
1161 		}
1162 		ccb->ccb_h.status |= CAM_SIM_QUEUED;
1163 		cm->length = csio->dxfer_len;
1164 		if (cm->length)
1165 			cm->data = csio->data_ptr;
1166 		break;
1167 	default:
1168 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_INVALID);
1169 		mpi3mr_release_command(cm);
1170 		xpt_done(ccb);
1171 		return;
1172 	}
1173 
1174 	/* Prepare SGEs and queue to hardware */
1175 	mpi3mr_map_request(sc, cm);
1176 }
1177 
1178 static void
mpi3mr_enqueue_request(struct mpi3mr_softc * sc,struct mpi3mr_cmd * cm)1179 mpi3mr_enqueue_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm)
1180 {
1181 	static int ratelimit;
1182 	struct mpi3mr_op_req_queue *opreqq = &sc->op_req_q[cm->req_qidx];
1183 	struct mpi3mr_throttle_group_info *tg = NULL;
1184 	uint32_t data_len_blks = 0;
1185 	uint32_t tracked_io_sz = 0;
1186 	uint32_t ioc_pend_data_len = 0, tg_pend_data_len = 0;
1187 	struct mpi3mr_target *targ = cm->targ;
1188 	union ccb *ccb = cm->ccb;
1189 	Mpi3SCSIIORequest_t *req = (Mpi3SCSIIORequest_t *)&cm->io_request;
1190 
1191 	if (sc->iot_enable) {
1192 		data_len_blks = ccb->csio.dxfer_len >> 9;
1193 
1194 		if ((data_len_blks >= sc->io_throttle_data_length) &&
1195 		    targ->io_throttle_enabled) {
1196 
1197 			tracked_io_sz = data_len_blks;
1198 			tg = targ->throttle_group;
1199 			if (tg) {
1200 				mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks);
1201 				mpi3mr_atomic_add(&tg->pend_large_data_sz, data_len_blks);
1202 
1203 				ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
1204 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
1205 
1206 				if (ratelimit % 1000) {
1207 					mpi3mr_dprint(sc, MPI3MR_IOT,
1208 						"large vd_io persist_id(%d), handle(0x%04x), data_len(%d),"
1209 						"ioc_pending(%d), tg_pending(%d), ioc_high(%d), tg_high(%d)\n",
1210 						targ->per_id, targ->dev_handle,
1211 						data_len_blks, ioc_pend_data_len,
1212 						tg_pend_data_len, sc->io_throttle_high,
1213 						tg->high);
1214 					ratelimit++;
1215 				}
1216 
1217 				if (!tg->io_divert  && ((ioc_pend_data_len >=
1218 				    sc->io_throttle_high) ||
1219 				    (tg_pend_data_len >= tg->high))) {
1220 					tg->io_divert = 1;
1221 					mpi3mr_dprint(sc, MPI3MR_IOT,
1222 						"VD: Setting divert flag for tg_id(%d), persist_id(%d)\n",
1223 						tg->id, targ->per_id);
1224 					if (sc->mpi3mr_debug & MPI3MR_IOT)
1225 						mpi3mr_print_cdb(ccb);
1226 					mpi3mr_set_io_divert_for_all_vd_in_tg(sc,
1227 					    tg, 1);
1228 				}
1229 			} else {
1230 				mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks);
1231 				ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
1232 				if (ratelimit % 1000) {
1233 					mpi3mr_dprint(sc, MPI3MR_IOT,
1234 					    "large pd_io persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_high(%d)\n",
1235 					    targ->per_id, targ->dev_handle,
1236 					    data_len_blks, ioc_pend_data_len,
1237 					    sc->io_throttle_high);
1238 					ratelimit++;
1239 				}
1240 
1241 				if (ioc_pend_data_len >= sc->io_throttle_high) {
1242 					targ->io_divert = 1;
1243 					mpi3mr_dprint(sc, MPI3MR_IOT,
1244 						"PD: Setting divert flag for persist_id(%d)\n",
1245 						targ->per_id);
1246 					if (sc->mpi3mr_debug & MPI3MR_IOT)
1247 						mpi3mr_print_cdb(ccb);
1248 				}
1249 			}
1250 		}
1251 
1252 		if (targ->io_divert) {
1253 			req->MsgFlags |= MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
1254 			req->Flags = htole32(le32toh(req->Flags) | MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING);
1255 		}
1256 	}
1257 
1258 	if (mpi3mr_submit_io(sc, opreqq, (U8 *)&cm->io_request)) {
1259 		if (tracked_io_sz) {
1260 			mpi3mr_atomic_sub(&sc->pend_large_data_sz, tracked_io_sz);
1261 			if (tg)
1262 				mpi3mr_atomic_sub(&tg->pend_large_data_sz, tracked_io_sz);
1263 		}
1264 		mpi3mr_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
1265 		mpi3mr_release_command(cm);
1266 		xpt_done(ccb);
1267 	} else {
1268 		callout_reset_sbt(&cm->callout, mstosbt(ccb->ccb_h.timeout), 0,
1269 		    mpi3mr_scsiio_timeout, cm, 0);
1270 		cm->callout_owner = true;
1271 		mpi3mr_atomic_inc(&sc->fw_outstanding);
1272 		mpi3mr_atomic_inc(&targ->outstanding);
1273 		if (mpi3mr_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
1274 			sc->io_cmds_highwater++;
1275 	}
1276 
1277 	return;
1278 }
1279 
1280 static void
mpi3mr_cam_poll(struct cam_sim * sim)1281 mpi3mr_cam_poll(struct cam_sim *sim)
1282 {
1283 	struct mpi3mr_cam_softc *cam_sc;
1284 	struct mpi3mr_irq_context *irq_ctx;
1285 	struct mpi3mr_softc *sc;
1286 	int i;
1287 
1288 	cam_sc = cam_sim_softc(sim);
1289 	sc = cam_sc->sc;
1290 
1291 	mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "func: %s line: %d is called\n",
1292 		__func__, __LINE__);
1293 
1294 	for (i = 0; i < sc->num_queues; i++) {
1295 		irq_ctx = sc->irq_ctx + i;
1296 		if (irq_ctx->op_reply_q->qid) {
1297 			mpi3mr_complete_io_cmd(sc, irq_ctx);
1298 		}
1299 	}
1300 }
1301 
1302 static void
mpi3mr_cam_action(struct cam_sim * sim,union ccb * ccb)1303 mpi3mr_cam_action(struct cam_sim *sim, union ccb *ccb)
1304 {
1305 	struct mpi3mr_cam_softc *cam_sc;
1306 	struct mpi3mr_target *targ;
1307 	struct mpi3mr_softc *sc;
1308 
1309 	cam_sc = cam_sim_softc(sim);
1310 	sc = cam_sc->sc;
1311 
1312 	mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "ccb func_code 0x%x target id: 0x%x\n",
1313 	    ccb->ccb_h.func_code, ccb->ccb_h.target_id);
1314 
1315 	mtx_assert(&cam_sc->sc->mpi3mr_mtx, MA_OWNED);
1316 
1317 	switch (ccb->ccb_h.func_code) {
1318 	case XPT_PATH_INQ:
1319 	{
1320 		struct ccb_pathinq *cpi = &ccb->cpi;
1321 
1322 		cpi->version_num = 1;
1323 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1324 		cpi->target_sprt = 0;
1325 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1326 		cpi->hba_eng_cnt = 0;
1327 		cpi->max_target = cam_sc->maxtargets - 1;
1328 		cpi->max_lun = 0;
1329 
1330 		/*
1331 		 * initiator_id is set here to an ID outside the set of valid
1332 		 * target IDs (including volumes).
1333 		 */
1334 		cpi->initiator_id = cam_sc->maxtargets;
1335 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1336 		strlcpy(cpi->hba_vid, "Broadcom", HBA_IDLEN);
1337 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1338 		cpi->unit_number = cam_sim_unit(sim);
1339 		cpi->bus_id = cam_sim_bus(sim);
1340 		/*
1341 		 * XXXSLM-I think this needs to change based on config page or
1342 		 * something instead of hardcoded to 150000.
1343 		 */
1344 		cpi->base_transfer_speed = 150000;
1345 		cpi->transport = XPORT_SAS;
1346 		cpi->transport_version = 0;
1347 		cpi->protocol = PROTO_SCSI;
1348 		cpi->protocol_version = SCSI_REV_SPC;
1349 
1350 		targ = mpi3mr_find_target_by_per_id(cam_sc, ccb->ccb_h.target_id);
1351 
1352 		if (targ && (targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
1353 		    ((targ->dev_spec.pcie_inf.dev_info &
1354 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
1355 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)) {
1356 			cpi->maxio = targ->dev_spec.pcie_inf.mdts;
1357 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1358 				"PCI device target_id: %u max io size: %u\n",
1359 				ccb->ccb_h.target_id, cpi->maxio);
1360 		} else {
1361 			cpi->maxio = PAGE_SIZE * (sc->max_sgl_entries - 1);
1362 		}
1363 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1364 		break;
1365 	}
1366 	case XPT_GET_TRAN_SETTINGS:
1367 	{
1368 		struct ccb_trans_settings	*cts;
1369 		struct ccb_trans_settings_sas	*sas;
1370 		struct ccb_trans_settings_scsi	*scsi;
1371 
1372 		cts = &ccb->cts;
1373 		sas = &cts->xport_specific.sas;
1374 		scsi = &cts->proto_specific.scsi;
1375 
1376 		KASSERT(cts->ccb_h.target_id < cam_sc->maxtargets,
1377 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1378 		    cts->ccb_h.target_id));
1379 		targ = mpi3mr_find_target_by_per_id(cam_sc, cts->ccb_h.target_id);
1380 
1381 		if (targ == NULL) {
1382 			mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "Device with target ID: 0x%x does not exist\n",
1383 			cts->ccb_h.target_id);
1384 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1385 			break;
1386 		}
1387 
1388 		if ((targ->dev_handle == 0x0) || (targ->dev_removed == 1))  {
1389 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1390 			break;
1391 		}
1392 
1393 		cts->protocol_version = SCSI_REV_SPC2;
1394 		cts->transport = XPORT_SAS;
1395 		cts->transport_version = 0;
1396 
1397 		sas->valid = CTS_SAS_VALID_SPEED;
1398 
1399 		switch (targ->link_rate) {
1400 		case 0x08:
1401 			sas->bitrate = 150000;
1402 			break;
1403 		case 0x09:
1404 			sas->bitrate = 300000;
1405 			break;
1406 		case 0x0a:
1407 			sas->bitrate = 600000;
1408 			break;
1409 		case 0x0b:
1410 			sas->bitrate = 1200000;
1411 			break;
1412 		default:
1413 			sas->valid = 0;
1414 		}
1415 
1416 		cts->protocol = PROTO_SCSI;
1417 		scsi->valid = CTS_SCSI_VALID_TQ;
1418 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1419 
1420 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1421 		break;
1422 	}
1423 	case XPT_CALC_GEOMETRY:
1424 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1425 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1426 		break;
1427 	case XPT_RESET_DEV:
1428 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action "
1429 		    "XPT_RESET_DEV\n");
1430 		return;
1431 	case XPT_RESET_BUS:
1432 	case XPT_ABORT:
1433 	case XPT_TERM_IO:
1434 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action faking success "
1435 		    "for abort or reset\n");
1436 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1437 		break;
1438 	case XPT_SCSI_IO:
1439 		mpi3mr_action_scsiio(cam_sc, ccb);
1440 		return;
1441 	default:
1442 		mpi3mr_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1443 		break;
1444 	}
1445 	xpt_done(ccb);
1446 }
1447 
1448 void
mpi3mr_startup_increment(struct mpi3mr_cam_softc * cam_sc)1449 mpi3mr_startup_increment(struct mpi3mr_cam_softc *cam_sc)
1450 {
1451 	if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) {
1452 		if (cam_sc->startup_refcount++ == 0) {
1453 			/* just starting, freeze the simq */
1454 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1455 			    "%s freezing simq\n", __func__);
1456 			xpt_hold_boot();
1457 		}
1458 		mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__,
1459 		    cam_sc->startup_refcount);
1460 	}
1461 }
1462 
1463 void
mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc * cam_sc)1464 mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc)
1465 {
1466 	if (cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) {
1467 		cam_sc->flags &= ~MPI3MRSAS_QUEUE_FROZEN;
1468 		xpt_release_simq(cam_sc->sim, 1);
1469 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "Unfreezing SIM queue\n");
1470 	}
1471 }
1472 
1473 void
mpi3mr_rescan_target(struct mpi3mr_softc * sc,struct mpi3mr_target * targ)1474 mpi3mr_rescan_target(struct mpi3mr_softc *sc, struct mpi3mr_target *targ)
1475 {
1476 	struct mpi3mr_cam_softc *cam_sc = sc->cam_sc;
1477 	path_id_t pathid;
1478 	target_id_t targetid;
1479 	union ccb *ccb;
1480 
1481 	pathid = cam_sim_path(cam_sc->sim);
1482 	if (targ == NULL)
1483 		targetid = CAM_TARGET_WILDCARD;
1484 	else
1485 		targetid = targ->per_id;
1486 
1487 	/*
1488 	 * Allocate a CCB and schedule a rescan.
1489 	 */
1490 	ccb = xpt_alloc_ccb_nowait();
1491 	if (ccb == NULL) {
1492 		mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to alloc CCB for rescan\n");
1493 		return;
1494 	}
1495 
1496 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
1497 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1498 		mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to create path for rescan\n");
1499 		xpt_free_ccb(ccb);
1500 		return;
1501 	}
1502 
1503 	if (targetid == CAM_TARGET_WILDCARD)
1504 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
1505 	else
1506 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
1507 
1508 	mpi3mr_dprint(sc, MPI3MR_EVENT, "%s target id 0x%x\n", __func__, targetid);
1509 	xpt_rescan(ccb);
1510 }
1511 
1512 void
mpi3mr_startup_decrement(struct mpi3mr_cam_softc * cam_sc)1513 mpi3mr_startup_decrement(struct mpi3mr_cam_softc *cam_sc)
1514 {
1515 	if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) {
1516 		if (--cam_sc->startup_refcount == 0) {
1517 			/* finished all discovery-related actions, release
1518 			 * the simq and rescan for the latest topology.
1519 			 */
1520 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1521 			    "%s releasing simq\n", __func__);
1522 			cam_sc->flags &= ~MPI3MRSAS_IN_STARTUP;
1523 			xpt_release_simq(cam_sc->sim, 1);
1524 			xpt_release_boot();
1525 		}
1526 		mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__,
1527 		    cam_sc->startup_refcount);
1528 	}
1529 }
1530 
1531 static void
mpi3mr_fw_event_free(struct mpi3mr_softc * sc,struct mpi3mr_fw_event_work * fw_event)1532 mpi3mr_fw_event_free(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
1533 {
1534 	if (!fw_event)
1535 		return;
1536 
1537 	if (fw_event->event_data != NULL) {
1538 		free(fw_event->event_data, M_MPI3MR);
1539 		fw_event->event_data = NULL;
1540 	}
1541 
1542 	free(fw_event, M_MPI3MR);
1543 	fw_event = NULL;
1544 }
1545 
1546 static void
mpi3mr_freeup_events(struct mpi3mr_softc * sc)1547 mpi3mr_freeup_events(struct mpi3mr_softc *sc)
1548 {
1549 	struct mpi3mr_fw_event_work *fw_event = NULL;
1550 	mtx_lock(&sc->mpi3mr_mtx);
1551 	while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) {
1552 		TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link);
1553 		mpi3mr_fw_event_free(sc, fw_event);
1554 	}
1555 	mtx_unlock(&sc->mpi3mr_mtx);
1556 }
1557 
1558 static void
mpi3mr_sastopochg_evt_debug(struct mpi3mr_softc * sc,Mpi3EventDataSasTopologyChangeList_t * event_data)1559 mpi3mr_sastopochg_evt_debug(struct mpi3mr_softc *sc,
1560 	Mpi3EventDataSasTopologyChangeList_t *event_data)
1561 {
1562 	int i;
1563 	U16 handle;
1564 	U8 reason_code, phy_number;
1565 	char *status_str = NULL;
1566 	U8 link_rate, prev_link_rate;
1567 
1568 	switch (event_data->ExpStatus) {
1569 	case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
1570 		status_str = "remove";
1571 		break;
1572 	case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
1573 		status_str =  "responding";
1574 		break;
1575 	case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
1576 		status_str = "remove delay";
1577 		break;
1578 	case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
1579 		status_str = "direct attached";
1580 		break;
1581 	default:
1582 		status_str = "unknown status";
1583 		break;
1584 	}
1585 
1586 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :sas topology change: (%s)\n",
1587 	    __func__, status_str);
1588 	mpi3mr_dprint(sc, MPI3MR_INFO,
1589 		"%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) "
1590 	    "start_phy(%02d), num_entries(%d)\n", __func__,
1591 	    (event_data->ExpanderDevHandle),
1592 	    (event_data->EnclosureHandle),
1593 	    event_data->StartPhyNum, event_data->NumEntries);
1594 	for (i = 0; i < event_data->NumEntries; i++) {
1595 		handle = (event_data->PhyEntry[i].AttachedDevHandle);
1596 		if (!handle)
1597 			continue;
1598 		phy_number = event_data->StartPhyNum + i;
1599 		reason_code = event_data->PhyEntry[i].PhyStatus &
1600 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1601 		switch (reason_code) {
1602 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1603 			status_str = "target remove";
1604 			break;
1605 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1606 			status_str = "delay target remove";
1607 			break;
1608 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1609 			status_str = "link rate change";
1610 			break;
1611 		case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1612 			status_str = "target responding";
1613 			break;
1614 		default:
1615 			status_str = "unknown";
1616 			break;
1617 		}
1618 		link_rate = event_data->PhyEntry[i].LinkRate >> 4;
1619 		prev_link_rate = event_data->PhyEntry[i].LinkRate & 0xF;
1620 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tphy(%02d), attached_handle(0x%04x): %s:"
1621 		    " link rate: new(0x%02x), old(0x%02x)\n", __func__,
1622 		    phy_number, handle, status_str, link_rate, prev_link_rate);
1623 	}
1624 }
1625 
1626 static void
mpi3mr_process_sastopochg_evt(struct mpi3mr_softc * sc,struct mpi3mr_fw_event_work * fwevt)1627 mpi3mr_process_sastopochg_evt(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fwevt)
1628 {
1629 
1630 	Mpi3EventDataSasTopologyChangeList_t *event_data =
1631 		    (Mpi3EventDataSasTopologyChangeList_t *)fwevt->event_data;
1632 	int i;
1633 	U16 handle;
1634 	U8 reason_code, link_rate;
1635 	struct mpi3mr_target *target = NULL;
1636 
1637 
1638 	mpi3mr_sastopochg_evt_debug(sc, event_data);
1639 
1640 	for (i = 0; i < event_data->NumEntries; i++) {
1641 		handle = le16toh(event_data->PhyEntry[i].AttachedDevHandle);
1642 		link_rate = event_data->PhyEntry[i].LinkRate >> 4;
1643 
1644 		if (!handle)
1645 			continue;
1646 		target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1647 
1648 		if (!target)
1649 			continue;
1650 
1651 		target->link_rate = link_rate;
1652 		reason_code = event_data->PhyEntry[i].PhyStatus &
1653 			MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1654 
1655 		switch (reason_code) {
1656 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1657 			if (target->exposed_to_os)
1658 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
1659 			mpi3mr_remove_device_from_list(sc, target, false);
1660 			break;
1661 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1662 			break;
1663 		default:
1664 			break;
1665 		}
1666 	}
1667 
1668 	/*
1669 	 * refcount was incremented for this event in
1670 	 * mpi3mr_evt_handler. Decrement it here because the event has
1671 	 * been processed.
1672 	 */
1673 	mpi3mr_startup_decrement(sc->cam_sc);
1674 	return;
1675 }
1676 
1677 static void
mpi3mr_pcietopochg_evt_debug(struct mpi3mr_softc * sc,Mpi3EventDataPcieTopologyChangeList_t * event_data)1678 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_softc *sc,
1679 	Mpi3EventDataPcieTopologyChangeList_t *event_data)
1680 {
1681 	int i;
1682 	U16 handle;
1683 	U16 reason_code;
1684 	U8 port_number;
1685 	char *status_str = NULL;
1686 	U8 link_rate, prev_link_rate;
1687 
1688 	switch (event_data->SwitchStatus) {
1689 	case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
1690 		status_str = "remove";
1691 		break;
1692 	case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
1693 		status_str =  "responding";
1694 		break;
1695 	case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
1696 		status_str = "remove delay";
1697 		break;
1698 	case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
1699 		status_str = "direct attached";
1700 		break;
1701 	default:
1702 		status_str = "unknown status";
1703 		break;
1704 	}
1705 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :pcie topology change: (%s)\n",
1706 		__func__, status_str);
1707 	mpi3mr_dprint(sc, MPI3MR_INFO,
1708 		"%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
1709 		"start_port(%02d), num_entries(%d)\n", __func__,
1710 		le16toh(event_data->SwitchDevHandle),
1711 		le16toh(event_data->EnclosureHandle),
1712 		event_data->StartPortNum, event_data->NumEntries);
1713 	for (i = 0; i < event_data->NumEntries; i++) {
1714 		handle =
1715 			le16toh(event_data->PortEntry[i].AttachedDevHandle);
1716 		if (!handle)
1717 			continue;
1718 		port_number = event_data->StartPortNum + i;
1719 		reason_code = event_data->PortEntry[i].PortStatus;
1720 		switch (reason_code) {
1721 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1722 			status_str = "target remove";
1723 			break;
1724 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1725 			status_str = "delay target remove";
1726 			break;
1727 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1728 			status_str = "link rate change";
1729 			break;
1730 		case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
1731 			status_str = "target responding";
1732 			break;
1733 		default:
1734 			status_str = "unknown";
1735 			break;
1736 		}
1737 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
1738 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1739 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
1740 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1741 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tport(%02d), attached_handle(0x%04x): %s:"
1742 		    " link rate: new(0x%02x), old(0x%02x)\n", __func__,
1743 		    port_number, handle, status_str, link_rate, prev_link_rate);
1744 	}
1745 }
1746 
mpi3mr_process_pcietopochg_evt(struct mpi3mr_softc * sc,struct mpi3mr_fw_event_work * fwevt)1747 static void mpi3mr_process_pcietopochg_evt(struct mpi3mr_softc *sc,
1748     struct mpi3mr_fw_event_work *fwevt)
1749 {
1750 	Mpi3EventDataPcieTopologyChangeList_t *event_data =
1751 		    (Mpi3EventDataPcieTopologyChangeList_t *)fwevt->event_data;
1752 	int i;
1753 	U16 handle;
1754 	U8 reason_code, link_rate;
1755 	struct mpi3mr_target *target = NULL;
1756 
1757 
1758 	mpi3mr_pcietopochg_evt_debug(sc, event_data);
1759 
1760 	for (i = 0; i < event_data->NumEntries; i++) {
1761 		handle =
1762 			le16toh(event_data->PortEntry[i].AttachedDevHandle);
1763 		if (!handle)
1764 			continue;
1765 		target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1766 		if (!target)
1767 			continue;
1768 
1769 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
1770 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1771 		target->link_rate = link_rate;
1772 
1773 		reason_code = event_data->PortEntry[i].PortStatus;
1774 
1775 		switch (reason_code) {
1776 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1777 			if (target->exposed_to_os)
1778 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
1779 			mpi3mr_remove_device_from_list(sc, target, false);
1780 			break;
1781 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1782 			break;
1783 		default:
1784 			break;
1785 		}
1786 	}
1787 
1788 	/*
1789 	 * refcount was incremented for this event in
1790 	 * mpi3mr_evt_handler. Decrement it here because the event has
1791 	 * been processed.
1792 	 */
1793 	mpi3mr_startup_decrement(sc->cam_sc);
1794 	return;
1795 }
1796 
mpi3mr_add_device(struct mpi3mr_softc * sc,U16 per_id)1797 void mpi3mr_add_device(struct mpi3mr_softc *sc, U16 per_id)
1798 {
1799 	struct mpi3mr_target *target;
1800 
1801 	mpi3mr_dprint(sc, MPI3MR_EVENT,
1802 		"Adding device(persistent id: 0x%x)\n", per_id);
1803 
1804 	mpi3mr_startup_increment(sc->cam_sc);
1805 	target = mpi3mr_find_target_by_per_id(sc->cam_sc, per_id);
1806 
1807 	if (!target) {
1808 		mpi3mr_dprint(sc, MPI3MR_INFO, "Not available in driver's"
1809 		    "internal target list, persistent_id: %d\n",
1810 		    per_id);
1811 		goto out;
1812 	}
1813 
1814 	if (target->is_hidden) {
1815 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Target is hidden, persistent_id: %d\n",
1816 			per_id);
1817 		goto out;
1818 	}
1819 
1820 	if (!target->exposed_to_os && !sc->reset_in_progress) {
1821 		mpi3mr_rescan_target(sc, target);
1822 		mpi3mr_dprint(sc, MPI3MR_INFO,
1823 			"Added device persistent_id: %d dev_handle: %d\n", per_id, target->dev_handle);
1824 		target->exposed_to_os = 1;
1825 	}
1826 
1827 out:
1828 	mpi3mr_startup_decrement(sc->cam_sc);
1829 }
1830 
mpi3mr_remove_device_from_os(struct mpi3mr_softc * sc,U16 handle)1831 int mpi3mr_remove_device_from_os(struct mpi3mr_softc *sc, U16 handle)
1832 {
1833 	int retval = 0;
1834 	struct mpi3mr_target *target;
1835 	unsigned int target_outstanding;
1836 
1837 	mpi3mr_dprint(sc, MPI3MR_EVENT,
1838 		"Removing Device (dev_handle: %d)\n", handle);
1839 
1840 	target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1841 
1842 	if (!target) {
1843 		mpi3mr_dprint(sc, MPI3MR_INFO,
1844 			"Device (persistent_id: %d dev_handle: %d) is already removed from driver's list\n",
1845 			target->per_id, handle);
1846 		mpi3mr_rescan_target(sc, NULL);
1847 		retval = -1;
1848 		goto out;
1849 	}
1850 
1851 	target->flags |= MPI3MRSAS_TARGET_INREMOVAL;
1852 
1853 	target_outstanding = mpi3mr_atomic_read(&target->outstanding);
1854 	if (target_outstanding) {
1855 		mpi3mr_dprint(sc, MPI3MR_ERROR, "there are [%2d] outstanding IOs on target: %d "
1856 			      "Poll reply queue once\n", target_outstanding, target->per_id);
1857  		mpi3mr_poll_pend_io_completions(sc);
1858 		target_outstanding = mpi3mr_atomic_read(&target->outstanding);
1859 		if (target_outstanding)
1860 			target_outstanding = mpi3mr_atomic_read(&target->outstanding);
1861 			mpi3mr_dprint(sc, MPI3MR_ERROR, "[%2d] outstanding IOs present on target: %d "
1862 				      "despite poll\n", target_outstanding, target->per_id);
1863  	}
1864 
1865 	if (target->exposed_to_os && !sc->reset_in_progress) {
1866 		mpi3mr_rescan_target(sc, target);
1867 		mpi3mr_dprint(sc, MPI3MR_INFO,
1868 			"Removed device(persistent_id: %d dev_handle: %d)\n", target->per_id, handle);
1869 		target->exposed_to_os = 0;
1870 	}
1871 
1872 	target->flags &= ~MPI3MRSAS_TARGET_INREMOVAL;
1873 out:
1874 	return retval;
1875 }
1876 
mpi3mr_remove_device_from_list(struct mpi3mr_softc * sc,struct mpi3mr_target * target,bool must_delete)1877 void mpi3mr_remove_device_from_list(struct mpi3mr_softc *sc,
1878 	struct mpi3mr_target *target, bool must_delete)
1879 {
1880 	if ((must_delete == false) &&
1881 	    (target->state != MPI3MR_DEV_REMOVE_HS_COMPLETED))
1882 		return;
1883 
1884 	mtx_lock_spin(&sc->target_lock);
1885 	TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
1886 	mtx_unlock_spin(&sc->target_lock);
1887 
1888 	free(target, M_MPI3MR);
1889 	target = NULL;
1890 
1891 	return;
1892 }
1893 
1894 /**
1895  * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
1896  * @sc: Adapter instance reference
1897  * @fwevt: Firmware event
1898  *
1899  * Process Device Status Change event and based on device's new
1900  * information, either expose the device to the upper layers, or
1901  * remove the device from upper layers.
1902  *
1903  * Return: Nothing.
1904  */
mpi3mr_devstatuschg_evt_bh(struct mpi3mr_softc * sc,struct mpi3mr_fw_event_work * fwevt)1905 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_softc *sc,
1906 	struct mpi3mr_fw_event_work *fwevt)
1907 {
1908 	U16 dev_handle = 0;
1909 	U8 uhide = 0, delete = 0, cleanup = 0;
1910 	struct mpi3mr_target *tgtdev = NULL;
1911 	Mpi3EventDataDeviceStatusChange_t *evtdata =
1912 	    (Mpi3EventDataDeviceStatusChange_t *)fwevt->event_data;
1913 
1914 
1915 
1916 	dev_handle = le16toh(evtdata->DevHandle);
1917 	mpi3mr_dprint(sc, MPI3MR_INFO,
1918 	    "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
1919 	    __func__, dev_handle, evtdata->ReasonCode);
1920 	switch (evtdata->ReasonCode) {
1921 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1922 		delete = 1;
1923 		break;
1924 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
1925 		uhide = 1;
1926 		break;
1927 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1928 		delete = 1;
1929 		cleanup = 1;
1930 		break;
1931 	default:
1932 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Unhandled reason code(0x%x)\n", __func__,
1933 		    evtdata->ReasonCode);
1934 		break;
1935 	}
1936 
1937 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
1938 	if (!tgtdev)
1939 		return;
1940 
1941 	if (uhide) {
1942 		if (!tgtdev->exposed_to_os)
1943 			mpi3mr_add_device(sc, tgtdev->per_id);
1944 	}
1945 
1946 	if (delete)
1947 		mpi3mr_remove_device_from_os(sc, dev_handle);
1948 
1949 	if (cleanup)
1950 		mpi3mr_remove_device_from_list(sc, tgtdev, false);
1951 }
1952 
1953 /**
1954  * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
1955  * @sc: Adapter instance reference
1956  * @dev_pg0: New device page0
1957  *
1958  * Process Device Info Change event and based on device's new
1959  * information, either expose the device to the upper layers, or
1960  * remove the device from upper layers or update the details of
1961  * the device.
1962  *
1963  * Return: Nothing.
1964  */
mpi3mr_devinfochg_evt_bh(struct mpi3mr_softc * sc,Mpi3DevicePage0_t * dev_pg0)1965 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_softc *sc,
1966 	Mpi3DevicePage0_t *dev_pg0)
1967 {
1968 	struct mpi3mr_target *tgtdev = NULL;
1969 	U16 dev_handle = 0, perst_id = 0;
1970 
1971 	perst_id = le16toh(dev_pg0->PersistentID);
1972 	dev_handle = le16toh(dev_pg0->DevHandle);
1973 	mpi3mr_dprint(sc, MPI3MR_INFO,
1974 	    "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
1975 	    __func__, dev_handle, perst_id);
1976 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
1977 	if (!tgtdev)
1978 		return;
1979 
1980 	mpi3mr_update_device(sc, tgtdev, dev_pg0, false);
1981 	if (!tgtdev->is_hidden && !tgtdev->exposed_to_os)
1982 		mpi3mr_add_device(sc, perst_id);
1983 
1984 	if (tgtdev->is_hidden && tgtdev->exposed_to_os)
1985 		mpi3mr_remove_device_from_os(sc, tgtdev->dev_handle);
1986 }
1987 
1988 static void
mpi3mr_fw_work(struct mpi3mr_softc * sc,struct mpi3mr_fw_event_work * fw_event)1989 mpi3mr_fw_work(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
1990 {
1991 	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
1992 		goto out;
1993 
1994 	if (!fw_event->process_event)
1995 		goto evt_ack;
1996 
1997 	mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Working on  Event: [%x]\n",
1998 	    event_count++, __func__, fw_event->event);
1999 
2000 	switch (fw_event->event) {
2001 	case MPI3_EVENT_DEVICE_ADDED:
2002 	{
2003 		Mpi3DevicePage0_t *dev_pg0 =
2004 			(Mpi3DevicePage0_t *) fw_event->event_data;
2005 		mpi3mr_add_device(sc, dev_pg0->PersistentID);
2006 		break;
2007 	}
2008 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
2009 	{
2010 		mpi3mr_devinfochg_evt_bh(sc,
2011 		    (Mpi3DevicePage0_t *) fw_event->event_data);
2012 		break;
2013 	}
2014 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
2015 	{
2016 		mpi3mr_devstatuschg_evt_bh(sc, fw_event);
2017 		break;
2018 	}
2019 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2020 	{
2021 		mpi3mr_process_sastopochg_evt(sc, fw_event);
2022 		break;
2023 	}
2024 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
2025 	{
2026 		mpi3mr_process_pcietopochg_evt(sc, fw_event);
2027 		break;
2028 	}
2029 	default:
2030 		mpi3mr_dprint(sc, MPI3MR_TRACE,"Unhandled event 0x%0X\n",
2031 		    fw_event->event);
2032 		break;
2033 
2034 	}
2035 
2036 evt_ack:
2037 	if (fw_event->send_ack) {
2038 		mpi3mr_dprint(sc, MPI3MR_EVENT,"Process event ACK for event 0x%0X\n",
2039 		    fw_event->event);
2040 		mpi3mr_process_event_ack(sc, fw_event->event,
2041 		    fw_event->event_context);
2042 	}
2043 
2044 out:
2045 	mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Event Free: [%x]\n", event_count,
2046 	    __func__, fw_event->event);
2047 
2048 	mpi3mr_fw_event_free(sc, fw_event);
2049 }
2050 
2051 void
mpi3mr_firmware_event_work(void * arg,int pending)2052 mpi3mr_firmware_event_work(void *arg, int pending)
2053 {
2054 	struct mpi3mr_fw_event_work *fw_event;
2055 	struct mpi3mr_softc *sc;
2056 
2057 	sc = (struct mpi3mr_softc *)arg;
2058 
2059 	mtx_lock(&sc->fwevt_lock);
2060 	while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) {
2061 		TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link);
2062 		mtx_unlock(&sc->fwevt_lock);
2063 		mpi3mr_fw_work(sc, fw_event);
2064 		mtx_lock(&sc->fwevt_lock);
2065 	}
2066 	mtx_unlock(&sc->fwevt_lock);
2067 }
2068 
2069 
2070 /*
2071  * mpi3mr_cam_attach - CAM layer registration
2072  * @sc: Adapter reference
2073  *
2074  * This function does simq allocation, cam registration, xpt_bus registration,
2075  * event taskqueue initialization and async event handler registration.
2076  *
2077  * Return: 0 on success and proper error codes on failure
2078  */
2079 int
mpi3mr_cam_attach(struct mpi3mr_softc * sc)2080 mpi3mr_cam_attach(struct mpi3mr_softc *sc)
2081 {
2082 	struct mpi3mr_cam_softc *cam_sc;
2083 	cam_status status;
2084 	int unit, error = 0, reqs;
2085 
2086 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Starting CAM Attach\n");
2087 
2088 	cam_sc = malloc(sizeof(struct mpi3mr_cam_softc), M_MPI3MR, M_WAITOK|M_ZERO);
2089 	cam_sc->maxtargets = sc->facts.max_perids + 1;
2090 
2091 	TAILQ_INIT(&cam_sc->tgt_list);
2092 
2093 	sc->cam_sc = cam_sc;
2094 	cam_sc->sc = sc;
2095 
2096 	reqs = sc->max_host_ios;
2097 
2098 	if ((cam_sc->devq = cam_simq_alloc(reqs)) == NULL) {
2099 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIMQ\n");
2100 		error = ENOMEM;
2101 		goto out;
2102 	}
2103 
2104 	unit = device_get_unit(sc->mpi3mr_dev);
2105 	cam_sc->sim = cam_sim_alloc(mpi3mr_cam_action, mpi3mr_cam_poll, "mpi3mr", cam_sc,
2106 	    unit, &sc->mpi3mr_mtx, reqs, reqs, cam_sc->devq);
2107 	if (cam_sc->sim == NULL) {
2108 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIM\n");
2109 		error = EINVAL;
2110 		goto out;
2111 	}
2112 
2113 	TAILQ_INIT(&cam_sc->ev_queue);
2114 
2115 	/* Initialize taskqueue for Event Handling */
2116 	TASK_INIT(&cam_sc->ev_task, 0, mpi3mr_firmware_event_work, sc);
2117 	cam_sc->ev_tq = taskqueue_create("mpi3mr_taskq", M_NOWAIT | M_ZERO,
2118 	    taskqueue_thread_enqueue, &cam_sc->ev_tq);
2119 	taskqueue_start_threads(&cam_sc->ev_tq, 1, PRIBIO, "%s taskq",
2120 	    device_get_nameunit(sc->mpi3mr_dev));
2121 
2122 	mtx_lock(&sc->mpi3mr_mtx);
2123 
2124 	/*
2125 	 * XXX There should be a bus for every port on the adapter, but since
2126 	 * we're just going to fake the topology for now, we'll pretend that
2127 	 * everything is just a target on a single bus.
2128 	 */
2129 	if ((error = xpt_bus_register(cam_sc->sim, sc->mpi3mr_dev, 0)) != 0) {
2130 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2131 		    "Error 0x%x registering SCSI bus\n", error);
2132 		mtx_unlock(&sc->mpi3mr_mtx);
2133 		goto out;
2134 	}
2135 
2136 	/*
2137 	 * Assume that discovery events will start right away.
2138 	 *
2139 	 * Hold off boot until discovery is complete.
2140 	 */
2141 	cam_sc->flags |= MPI3MRSAS_IN_STARTUP | MPI3MRSAS_IN_DISCOVERY;
2142 	sc->cam_sc->startup_refcount = 0;
2143 	mpi3mr_startup_increment(cam_sc);
2144 
2145 	callout_init(&cam_sc->discovery_callout, 1 /*mpsafe*/);
2146 
2147 	/*
2148 	 * Register for async events so we can determine the EEDP
2149 	 * capabilities of devices.
2150 	 */
2151 	status = xpt_create_path(&cam_sc->path, /*periph*/NULL,
2152 	    cam_sim_path(sc->cam_sc->sim), CAM_TARGET_WILDCARD,
2153 	    CAM_LUN_WILDCARD);
2154 	if (status != CAM_REQ_CMP) {
2155 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2156 		    "Error 0x%x creating sim path\n", status);
2157 		cam_sc->path = NULL;
2158 	}
2159 
2160 	if (status != CAM_REQ_CMP) {
2161 		/*
2162 		 * EEDP use is the exception, not the rule.
2163 		 * Warn the user, but do not fail to attach.
2164 		 */
2165 		mpi3mr_dprint(sc, MPI3MR_INFO, "EEDP capabilities disabled.\n");
2166 	}
2167 
2168 	mtx_unlock(&sc->mpi3mr_mtx);
2169 
2170 	error = mpi3mr_register_events(sc);
2171 
2172 out:
2173 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s Exiting CAM attach, error: 0x%x n", __func__, error);
2174 	return (error);
2175 }
2176 
2177 int
mpi3mr_cam_detach(struct mpi3mr_softc * sc)2178 mpi3mr_cam_detach(struct mpi3mr_softc *sc)
2179 {
2180 	struct mpi3mr_cam_softc *cam_sc;
2181 	struct mpi3mr_target *target;
2182 
2183 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Starting CAM detach\n", __func__);
2184 	if (sc->cam_sc == NULL)
2185 		return (0);
2186 
2187 	cam_sc = sc->cam_sc;
2188 
2189 	mpi3mr_freeup_events(sc);
2190 
2191 	/*
2192 	 * Drain and free the event handling taskqueue with the lock
2193 	 * unheld so that any parallel processing tasks drain properly
2194 	 * without deadlocking.
2195 	 */
2196 	if (cam_sc->ev_tq != NULL)
2197 		taskqueue_free(cam_sc->ev_tq);
2198 
2199 	mtx_lock(&sc->mpi3mr_mtx);
2200 
2201 	while (cam_sc->startup_refcount != 0)
2202 		mpi3mr_startup_decrement(cam_sc);
2203 
2204 	/* Deregister our async handler */
2205 	if (cam_sc->path != NULL) {
2206 		xpt_free_path(cam_sc->path);
2207 		cam_sc->path = NULL;
2208 	}
2209 
2210 	if (cam_sc->flags & MPI3MRSAS_IN_STARTUP)
2211 		xpt_release_simq(cam_sc->sim, 1);
2212 
2213 	if (cam_sc->sim != NULL) {
2214 		xpt_bus_deregister(cam_sim_path(cam_sc->sim));
2215 		cam_sim_free(cam_sc->sim, FALSE);
2216 	}
2217 
2218 	mtx_unlock(&sc->mpi3mr_mtx);
2219 
2220 	if (cam_sc->devq != NULL)
2221 		cam_simq_free(cam_sc->devq);
2222 
2223 get_target:
2224 	mtx_lock_spin(&sc->target_lock);
2225  	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
2226  		TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
2227 		mtx_unlock_spin(&sc->target_lock);
2228 		goto out_tgt_free;
2229 	}
2230 	mtx_unlock_spin(&sc->target_lock);
2231 out_tgt_free:
2232 	if (target) {
2233 		free(target, M_MPI3MR);
2234 		target = NULL;
2235 		goto get_target;
2236  	}
2237 
2238 	free(cam_sc, M_MPI3MR);
2239 	sc->cam_sc = NULL;
2240 
2241 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Exiting CAM detach\n", __func__);
2242 	return (0);
2243 }
2244