xref: /linux/drivers/scsi/mpi3mr/mpi3mr_app.c (revision 2e3fcbcc3b0eb9b96d2912cdac920f0ae8d1c8f2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2023 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 #include <linux/bsg-lib.h>
12 #include <uapi/scsi/scsi_bsg_mpi3mr.h>
13 
14 /**
15  * mpi3mr_alloc_trace_buffer: Allocate segmented trace buffer
16  * @mrioc: Adapter instance reference
17  * @trace_size: Trace buffer size
18  *
19  * Allocate either segmented memory pools or contiguous buffer
20  * based on the controller capability for the host trace
21  * buffer.
22  *
23  * Return: 0 on success, non-zero on failure.
24  */
mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc * mrioc,u32 trace_size)25 static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc *mrioc, u32 trace_size)
26 {
27 	struct diag_buffer_desc *diag_buffer = &mrioc->diag_buffers[0];
28 	int i, sz;
29 	u64 *diag_buffer_list = NULL;
30 	dma_addr_t diag_buffer_list_dma;
31 	u32 seg_count;
32 
33 	if (mrioc->seg_tb_support) {
34 		seg_count = (trace_size) / MPI3MR_PAGE_SIZE_4K;
35 		trace_size = seg_count * MPI3MR_PAGE_SIZE_4K;
36 
37 		diag_buffer_list = dma_alloc_coherent(&mrioc->pdev->dev,
38 				sizeof(u64) * seg_count,
39 				&diag_buffer_list_dma, GFP_KERNEL);
40 		if (!diag_buffer_list)
41 			return -1;
42 
43 		mrioc->num_tb_segs = seg_count;
44 
45 		sz = sizeof(struct segments) * seg_count;
46 		mrioc->trace_buf = kzalloc(sz, GFP_KERNEL);
47 		if (!mrioc->trace_buf)
48 			goto trace_buf_failed;
49 
50 		mrioc->trace_buf_pool = dma_pool_create("trace_buf pool",
51 		    &mrioc->pdev->dev, MPI3MR_PAGE_SIZE_4K, MPI3MR_PAGE_SIZE_4K,
52 		    0);
53 		if (!mrioc->trace_buf_pool) {
54 			ioc_err(mrioc, "trace buf pool: dma_pool_create failed\n");
55 			goto trace_buf_pool_failed;
56 		}
57 
58 		for (i = 0; i < seg_count; i++) {
59 			mrioc->trace_buf[i].segment =
60 			    dma_pool_zalloc(mrioc->trace_buf_pool, GFP_KERNEL,
61 			    &mrioc->trace_buf[i].segment_dma);
62 			diag_buffer_list[i] =
63 			    (u64) mrioc->trace_buf[i].segment_dma;
64 			if (!diag_buffer_list[i])
65 				goto tb_seg_alloc_failed;
66 		}
67 
68 		diag_buffer->addr =  diag_buffer_list;
69 		diag_buffer->dma_addr = diag_buffer_list_dma;
70 		diag_buffer->is_segmented = true;
71 
72 		dprint_init(mrioc, "segmented trace diag buffer\n"
73 				"is allocated successfully seg_count:%d\n", seg_count);
74 		return 0;
75 	} else {
76 		diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
77 		    trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
78 		if (diag_buffer->addr) {
79 			dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
80 			return 0;
81 		}
82 		return -1;
83 	}
84 
85 tb_seg_alloc_failed:
86 	if (mrioc->trace_buf_pool) {
87 		for (i = 0; i < mrioc->num_tb_segs; i++) {
88 			if (mrioc->trace_buf[i].segment) {
89 				dma_pool_free(mrioc->trace_buf_pool,
90 				    mrioc->trace_buf[i].segment,
91 				    mrioc->trace_buf[i].segment_dma);
92 				mrioc->trace_buf[i].segment = NULL;
93 			}
94 			mrioc->trace_buf[i].segment = NULL;
95 		}
96 		dma_pool_destroy(mrioc->trace_buf_pool);
97 		mrioc->trace_buf_pool = NULL;
98 	}
99 trace_buf_pool_failed:
100 	kfree(mrioc->trace_buf);
101 	mrioc->trace_buf = NULL;
102 trace_buf_failed:
103 	if (diag_buffer_list)
104 		dma_free_coherent(&mrioc->pdev->dev,
105 		    sizeof(u64) * mrioc->num_tb_segs,
106 		    diag_buffer_list, diag_buffer_list_dma);
107 	return -1;
108 }
109 
110 /**
111  * mpi3mr_alloc_diag_bufs - Allocate memory for diag buffers
112  * @mrioc: Adapter instance reference
113  *
114  * This functions checks whether the driver defined buffer sizes
115  * are greater than IOCFacts provided controller local buffer
116  * sizes and if the driver defined sizes are more then the
117  * driver allocates the specific buffer by reading driver page1
118  *
119  * Return: Nothing.
120  */
mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc * mrioc)121 void mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc *mrioc)
122 {
123 	struct diag_buffer_desc *diag_buffer;
124 	struct mpi3_driver_page1 driver_pg1;
125 	u32 trace_dec_size, trace_min_size, fw_dec_size, fw_min_size,
126 		trace_size, fw_size;
127 	u16 pg_sz = sizeof(driver_pg1);
128 	int retval = 0;
129 	bool retry = false;
130 
131 	if (mrioc->diag_buffers[0].addr || mrioc->diag_buffers[1].addr)
132 		return;
133 
134 	retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
135 	if (retval) {
136 		ioc_warn(mrioc,
137 		    "%s: driver page 1 read failed, allocating trace\n"
138 		    "and firmware diag buffers of default size\n", __func__);
139 		trace_size = fw_size = MPI3MR_DEFAULT_HDB_MAX_SZ;
140 		trace_dec_size = fw_dec_size = MPI3MR_DEFAULT_HDB_DEC_SZ;
141 		trace_min_size = fw_min_size = MPI3MR_DEFAULT_HDB_MIN_SZ;
142 
143 	} else {
144 		trace_size = driver_pg1.host_diag_trace_max_size * 1024;
145 		trace_dec_size = driver_pg1.host_diag_trace_decrement_size
146 			 * 1024;
147 		trace_min_size = driver_pg1.host_diag_trace_min_size * 1024;
148 		fw_size = driver_pg1.host_diag_fw_max_size * 1024;
149 		fw_dec_size = driver_pg1.host_diag_fw_decrement_size * 1024;
150 		fw_min_size = driver_pg1.host_diag_fw_min_size * 1024;
151 		dprint_init(mrioc,
152 		    "%s:trace diag buffer sizes read from driver\n"
153 		    "page1: maximum size = %dKB, decrement size = %dKB\n"
154 		    ", minimum size = %dKB\n", __func__, driver_pg1.host_diag_trace_max_size,
155 		    driver_pg1.host_diag_trace_decrement_size,
156 		    driver_pg1.host_diag_trace_min_size);
157 		dprint_init(mrioc,
158 		    "%s:firmware diag buffer sizes read from driver\n"
159 		    "page1: maximum size = %dKB, decrement size = %dKB\n"
160 		    ", minimum size = %dKB\n", __func__, driver_pg1.host_diag_fw_max_size,
161 		    driver_pg1.host_diag_fw_decrement_size,
162 		    driver_pg1.host_diag_fw_min_size);
163 		if ((trace_size == 0) && (fw_size == 0))
164 			return;
165 	}
166 
167 
168 retry_trace:
169 	diag_buffer = &mrioc->diag_buffers[0];
170 	diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_TRACE;
171 	diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED;
172 	if ((mrioc->facts.diag_trace_sz < trace_size) && (trace_size >=
173 		trace_min_size)) {
174 		if (!retry)
175 			dprint_init(mrioc,
176 			    "trying to allocate trace diag buffer of size = %dKB\n",
177 			    trace_size / 1024);
178 		if ((!mrioc->seg_tb_support && (get_order(trace_size) > MAX_PAGE_ORDER)) ||
179 		    mpi3mr_alloc_trace_buffer(mrioc, trace_size)) {
180 
181 			retry = true;
182 			trace_size -= trace_dec_size;
183 			dprint_init(mrioc, "trace diag buffer allocation failed\n"
184 			"retrying smaller size %dKB\n", trace_size / 1024);
185 			goto retry_trace;
186 		} else
187 			diag_buffer->size = trace_size;
188 	}
189 
190 	retry = false;
191 retry_fw:
192 
193 	diag_buffer = &mrioc->diag_buffers[1];
194 
195 	diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_FW;
196 	diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED;
197 	if ((mrioc->facts.diag_fw_sz < fw_size) && (fw_size >= fw_min_size)) {
198 		if (get_order(fw_size) <= MAX_PAGE_ORDER) {
199 			diag_buffer->addr
200 				= dma_alloc_coherent(&mrioc->pdev->dev, fw_size,
201 						     &diag_buffer->dma_addr,
202 						     GFP_KERNEL);
203 		}
204 		if (!retry)
205 			dprint_init(mrioc,
206 			    "%s:trying to allocate firmware diag buffer of size = %dKB\n",
207 			    __func__, fw_size / 1024);
208 		if (diag_buffer->addr) {
209 			dprint_init(mrioc, "%s:firmware diag buffer allocated successfully\n",
210 			    __func__);
211 			diag_buffer->size = fw_size;
212 		} else {
213 			retry = true;
214 			fw_size -= fw_dec_size;
215 			dprint_init(mrioc, "%s:trace diag buffer allocation failed,\n"
216 					"retrying smaller size %dKB\n",
217 					__func__, fw_size / 1024);
218 			goto retry_fw;
219 		}
220 	}
221 }
222 
223 /**
224  * mpi3mr_issue_diag_buf_post - Send diag buffer post req
225  * @mrioc: Adapter instance reference
226  * @diag_buffer: Diagnostic buffer descriptor
227  *
228  * Issue diagnostic buffer post MPI request through admin queue
229  * and wait for the completion of it or time out.
230  *
231  * Return: 0 on success, non-zero on failures.
232  */
mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc * mrioc,struct diag_buffer_desc * diag_buffer)233 int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
234 	struct diag_buffer_desc *diag_buffer)
235 {
236 	struct mpi3_diag_buffer_post_request diag_buf_post_req;
237 	u8 prev_status;
238 	int retval = 0;
239 
240 	if (diag_buffer->disabled_after_reset) {
241 		dprint_bsg_err(mrioc, "%s: skipping diag buffer posting\n"
242 				"as it is disabled after reset\n", __func__);
243 		return -1;
244 	}
245 
246 	memset(&diag_buf_post_req, 0, sizeof(diag_buf_post_req));
247 	mutex_lock(&mrioc->init_cmds.mutex);
248 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
249 		dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
250 		mutex_unlock(&mrioc->init_cmds.mutex);
251 		return -1;
252 	}
253 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
254 	mrioc->init_cmds.is_waiting = 1;
255 	mrioc->init_cmds.callback = NULL;
256 	diag_buf_post_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
257 	diag_buf_post_req.function = MPI3_FUNCTION_DIAG_BUFFER_POST;
258 	diag_buf_post_req.type = diag_buffer->type;
259 	diag_buf_post_req.address = le64_to_cpu(diag_buffer->dma_addr);
260 	diag_buf_post_req.length = le32_to_cpu(diag_buffer->size);
261 
262 	if (diag_buffer->is_segmented)
263 		diag_buf_post_req.msg_flags |= MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED;
264 
265 	dprint_bsg_info(mrioc, "%s: posting diag buffer type %d segmented:%d\n", __func__,
266 	    diag_buffer->type, diag_buffer->is_segmented);
267 
268 	prev_status = diag_buffer->status;
269 	diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED;
270 	init_completion(&mrioc->init_cmds.done);
271 	retval = mpi3mr_admin_request_post(mrioc, &diag_buf_post_req,
272 	    sizeof(diag_buf_post_req), 1);
273 	if (retval) {
274 		dprint_bsg_err(mrioc, "%s: admin request post failed\n",
275 		    __func__);
276 		goto out_unlock;
277 	}
278 	wait_for_completion_timeout(&mrioc->init_cmds.done,
279 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
280 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
281 		mrioc->init_cmds.is_waiting = 0;
282 		dprint_bsg_err(mrioc, "%s: command timedout\n", __func__);
283 		mpi3mr_check_rh_fault_ioc(mrioc,
284 		    MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT);
285 		retval = -1;
286 		goto out_unlock;
287 	}
288 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
289 	    != MPI3_IOCSTATUS_SUCCESS) {
290 		dprint_bsg_err(mrioc,
291 		    "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n",
292 		    __func__, diag_buffer->type,
293 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
294 		    mrioc->init_cmds.ioc_loginfo);
295 		retval = -1;
296 		goto out_unlock;
297 	}
298 	dprint_bsg_info(mrioc, "%s: diag buffer type %d posted successfully\n",
299 	    __func__, diag_buffer->type);
300 
301 out_unlock:
302 	if (retval)
303 		diag_buffer->status = prev_status;
304 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
305 	mutex_unlock(&mrioc->init_cmds.mutex);
306 	return retval;
307 }
308 
309 /**
310  * mpi3mr_post_diag_bufs - Post diag buffers to the controller
311  * @mrioc: Adapter instance reference
312  *
313  * This function calls helper function to post both trace and
314  * firmware buffers to the controller.
315  *
316  * Return: None
317  */
mpi3mr_post_diag_bufs(struct mpi3mr_ioc * mrioc)318 int mpi3mr_post_diag_bufs(struct mpi3mr_ioc *mrioc)
319 {
320 	u8 i;
321 	struct diag_buffer_desc *diag_buffer;
322 
323 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
324 		diag_buffer = &mrioc->diag_buffers[i];
325 		if (!(diag_buffer->addr))
326 			continue;
327 		if (mpi3mr_issue_diag_buf_post(mrioc, diag_buffer))
328 			return -1;
329 	}
330 	return 0;
331 }
332 
333 /**
334  * mpi3mr_issue_diag_buf_release - Send diag buffer release req
335  * @mrioc: Adapter instance reference
336  * @diag_buffer: Diagnostic buffer descriptor
337  *
338  * Issue diagnostic buffer manage MPI request with release
339  * action request through admin queue and wait for the
340  * completion of it or time out.
341  *
342  * Return: 0 on success, non-zero on failures.
343  */
mpi3mr_issue_diag_buf_release(struct mpi3mr_ioc * mrioc,struct diag_buffer_desc * diag_buffer)344 int mpi3mr_issue_diag_buf_release(struct mpi3mr_ioc *mrioc,
345 	struct diag_buffer_desc *diag_buffer)
346 {
347 	struct mpi3_diag_buffer_manage_request diag_buf_manage_req;
348 	int retval = 0;
349 
350 	if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) &&
351 	    (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED))
352 		return retval;
353 
354 	memset(&diag_buf_manage_req, 0, sizeof(diag_buf_manage_req));
355 	mutex_lock(&mrioc->init_cmds.mutex);
356 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
357 		dprint_reset(mrioc, "%s: command is in use\n", __func__);
358 		mutex_unlock(&mrioc->init_cmds.mutex);
359 		return -1;
360 	}
361 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
362 	mrioc->init_cmds.is_waiting = 1;
363 	mrioc->init_cmds.callback = NULL;
364 	diag_buf_manage_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
365 	diag_buf_manage_req.function = MPI3_FUNCTION_DIAG_BUFFER_MANAGE;
366 	diag_buf_manage_req.type = diag_buffer->type;
367 	diag_buf_manage_req.action = MPI3_DIAG_BUFFER_ACTION_RELEASE;
368 
369 
370 	dprint_reset(mrioc, "%s: releasing diag buffer type %d\n", __func__,
371 	    diag_buffer->type);
372 	init_completion(&mrioc->init_cmds.done);
373 	retval = mpi3mr_admin_request_post(mrioc, &diag_buf_manage_req,
374 	    sizeof(diag_buf_manage_req), 1);
375 	if (retval) {
376 		dprint_reset(mrioc, "%s: admin request post failed\n", __func__);
377 		mpi3mr_set_trigger_data_in_hdb(diag_buffer,
378 		    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
379 		goto out_unlock;
380 	}
381 	wait_for_completion_timeout(&mrioc->init_cmds.done,
382 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
383 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
384 		mrioc->init_cmds.is_waiting = 0;
385 		dprint_reset(mrioc, "%s: command timedout\n", __func__);
386 		mpi3mr_check_rh_fault_ioc(mrioc,
387 		    MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT);
388 		retval = -1;
389 		goto out_unlock;
390 	}
391 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
392 	    != MPI3_IOCSTATUS_SUCCESS) {
393 		dprint_reset(mrioc,
394 		    "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n",
395 		    __func__, diag_buffer->type,
396 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
397 		    mrioc->init_cmds.ioc_loginfo);
398 		retval = -1;
399 		goto out_unlock;
400 	}
401 	dprint_reset(mrioc, "%s: diag buffer type %d released successfully\n",
402 	    __func__, diag_buffer->type);
403 
404 out_unlock:
405 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
406 	mutex_unlock(&mrioc->init_cmds.mutex);
407 	return retval;
408 }
409 
410 /**
411  * mpi3mr_process_trigger - Generic HDB Trigger handler
412  * @mrioc: Adapter instance reference
413  * @trigger_type: Trigger type
414  * @trigger_data: Trigger data
415  * @trigger_flags: Trigger flags
416  *
417  * This function checks validity of HDB, triggers and based on
418  * trigger information, creates an event to be processed in the
419  * firmware event worker thread .
420  *
421  * This function should be called with trigger spinlock held
422  *
423  * Return: Nothing
424  */
mpi3mr_process_trigger(struct mpi3mr_ioc * mrioc,u8 trigger_type,union mpi3mr_trigger_data * trigger_data,u8 trigger_flags)425 static void mpi3mr_process_trigger(struct mpi3mr_ioc *mrioc, u8 trigger_type,
426 	union mpi3mr_trigger_data *trigger_data, u8 trigger_flags)
427 {
428 	struct trigger_event_data event_data;
429 	struct diag_buffer_desc *trace_hdb = NULL;
430 	struct diag_buffer_desc *fw_hdb = NULL;
431 	u64 global_trigger;
432 
433 	trace_hdb = mpi3mr_diag_buffer_for_type(mrioc,
434 	    MPI3_DIAG_BUFFER_TYPE_TRACE);
435 	if (trace_hdb &&
436 	    (trace_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) &&
437 	    (trace_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED))
438 		trace_hdb =  NULL;
439 
440 	fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
441 
442 	if (fw_hdb &&
443 	    (fw_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) &&
444 	    (fw_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED))
445 		fw_hdb = NULL;
446 
447 	if (mrioc->snapdump_trigger_active || (mrioc->fw_release_trigger_active
448 	    && mrioc->trace_release_trigger_active) ||
449 	    (!trace_hdb && !fw_hdb) || (!mrioc->driver_pg2) ||
450 	    ((trigger_type == MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
451 	     && (!mrioc->driver_pg2->num_triggers)))
452 		return;
453 
454 	memset(&event_data, 0, sizeof(event_data));
455 	event_data.trigger_type = trigger_type;
456 	memcpy(&event_data.trigger_specific_data, trigger_data,
457 	    sizeof(*trigger_data));
458 	global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger);
459 
460 	if (global_trigger & MPI3_DRIVER2_GLOBALTRIGGER_SNAPDUMP_ENABLED) {
461 		event_data.snapdump = true;
462 		event_data.trace_hdb = trace_hdb;
463 		event_data.fw_hdb = fw_hdb;
464 		mrioc->snapdump_trigger_active = true;
465 	} else if (trigger_type == MPI3MR_HDB_TRIGGER_TYPE_GLOBAL) {
466 		if ((trace_hdb) && (global_trigger &
467 		    MPI3_DRIVER2_GLOBALTRIGGER_DIAG_TRACE_RELEASE) &&
468 		    (!mrioc->trace_release_trigger_active)) {
469 			event_data.trace_hdb = trace_hdb;
470 			mrioc->trace_release_trigger_active = true;
471 		}
472 		if ((fw_hdb) && (global_trigger &
473 		    MPI3_DRIVER2_GLOBALTRIGGER_DIAG_FW_RELEASE) &&
474 		    (!mrioc->fw_release_trigger_active)) {
475 			event_data.fw_hdb = fw_hdb;
476 			mrioc->fw_release_trigger_active = true;
477 		}
478 	} else if (trigger_type == MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) {
479 		if ((trace_hdb) && (trigger_flags &
480 		    MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE) &&
481 		    (!mrioc->trace_release_trigger_active)) {
482 			event_data.trace_hdb = trace_hdb;
483 			mrioc->trace_release_trigger_active = true;
484 		}
485 		if ((fw_hdb) && (trigger_flags &
486 		    MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE) &&
487 		    (!mrioc->fw_release_trigger_active)) {
488 			event_data.fw_hdb = fw_hdb;
489 			mrioc->fw_release_trigger_active = true;
490 		}
491 	}
492 
493 	if (event_data.trace_hdb || event_data.fw_hdb)
494 		mpi3mr_hdb_trigger_data_event(mrioc, &event_data);
495 }
496 
497 /**
498  * mpi3mr_global_trigger - Global HDB trigger handler
499  * @mrioc: Adapter instance reference
500  * @trigger_data: Trigger data
501  *
502  * This function checks whether the given global trigger is
503  * enabled in the driver page 2 and if so calls generic trigger
504  * handler to queue event for HDB release.
505  *
506  * Return: Nothing
507  */
mpi3mr_global_trigger(struct mpi3mr_ioc * mrioc,u64 trigger_data)508 void mpi3mr_global_trigger(struct mpi3mr_ioc *mrioc, u64 trigger_data)
509 {
510 	unsigned long flags;
511 	union mpi3mr_trigger_data trigger_specific_data;
512 
513 	spin_lock_irqsave(&mrioc->trigger_lock, flags);
514 	if (le64_to_cpu(mrioc->driver_pg2->global_trigger) & trigger_data) {
515 		memset(&trigger_specific_data, 0,
516 		    sizeof(trigger_specific_data));
517 		trigger_specific_data.global = trigger_data;
518 		mpi3mr_process_trigger(mrioc, MPI3MR_HDB_TRIGGER_TYPE_GLOBAL,
519 		    &trigger_specific_data, 0);
520 	}
521 	spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
522 }
523 
524 /**
525  * mpi3mr_scsisense_trigger - SCSI sense HDB trigger handler
526  * @mrioc: Adapter instance reference
527  * @sensekey: Sense Key
528  * @asc: Additional Sense Code
529  * @ascq: Additional Sense Code Qualifier
530  *
531  * This function compares SCSI sense trigger values with driver
532  * page 2 values and calls generic trigger handler to release
533  * HDBs if match found
534  *
535  * Return: Nothing
536  */
mpi3mr_scsisense_trigger(struct mpi3mr_ioc * mrioc,u8 sensekey,u8 asc,u8 ascq)537 void mpi3mr_scsisense_trigger(struct mpi3mr_ioc *mrioc, u8 sensekey, u8 asc,
538 	u8 ascq)
539 {
540 	struct mpi3_driver2_trigger_scsi_sense *scsi_sense_trigger = NULL;
541 	u64 i = 0;
542 	unsigned long flags;
543 	u8 num_triggers, trigger_flags;
544 
545 	if (mrioc->scsisense_trigger_present) {
546 		spin_lock_irqsave(&mrioc->trigger_lock, flags);
547 		scsi_sense_trigger = (struct mpi3_driver2_trigger_scsi_sense *)
548 			mrioc->driver_pg2->trigger;
549 		num_triggers = mrioc->driver_pg2->num_triggers;
550 		for (i = 0; i < num_triggers; i++, scsi_sense_trigger++) {
551 			if (scsi_sense_trigger->type !=
552 			    MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE)
553 				continue;
554 			if (!(scsi_sense_trigger->sense_key ==
555 			    MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL
556 			      || scsi_sense_trigger->sense_key == sensekey))
557 				continue;
558 			if (!(scsi_sense_trigger->asc ==
559 			    MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL ||
560 			    scsi_sense_trigger->asc == asc))
561 				continue;
562 			if (!(scsi_sense_trigger->ascq ==
563 			    MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL ||
564 			    scsi_sense_trigger->ascq == ascq))
565 				continue;
566 			trigger_flags = scsi_sense_trigger->flags;
567 			mpi3mr_process_trigger(mrioc,
568 			    MPI3MR_HDB_TRIGGER_TYPE_ELEMENT,
569 			    (union mpi3mr_trigger_data *)scsi_sense_trigger,
570 			    trigger_flags);
571 			break;
572 		}
573 		spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
574 	}
575 }
576 
577 /**
578  * mpi3mr_event_trigger - MPI event HDB trigger handler
579  * @mrioc: Adapter instance reference
580  * @event: MPI Event
581  *
582  * This function compares event trigger values with driver page
583  * 2 values and calls generic trigger handler to release
584  * HDBs if match found.
585  *
586  * Return: Nothing
587  */
mpi3mr_event_trigger(struct mpi3mr_ioc * mrioc,u8 event)588 void mpi3mr_event_trigger(struct mpi3mr_ioc *mrioc, u8 event)
589 {
590 	struct mpi3_driver2_trigger_event *event_trigger = NULL;
591 	u64 i = 0;
592 	unsigned long flags;
593 	u8 num_triggers, trigger_flags;
594 
595 	if (mrioc->event_trigger_present) {
596 		spin_lock_irqsave(&mrioc->trigger_lock, flags);
597 		event_trigger = (struct mpi3_driver2_trigger_event *)
598 			mrioc->driver_pg2->trigger;
599 		num_triggers = mrioc->driver_pg2->num_triggers;
600 
601 		for (i = 0; i < num_triggers; i++, event_trigger++) {
602 			if (event_trigger->type !=
603 			    MPI3_DRIVER2_TRIGGER_TYPE_EVENT)
604 				continue;
605 			if (event_trigger->event != event)
606 				continue;
607 			trigger_flags = event_trigger->flags;
608 			mpi3mr_process_trigger(mrioc,
609 			    MPI3MR_HDB_TRIGGER_TYPE_ELEMENT,
610 			    (union mpi3mr_trigger_data *)event_trigger,
611 			    trigger_flags);
612 			break;
613 		}
614 		spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
615 	}
616 }
617 
618 /**
619  * mpi3mr_reply_trigger - MPI Reply HDB trigger handler
620  * @mrioc: Adapter instance reference
621  * @ioc_status: Masked value of IOC Status from MPI Reply
622  * @ioc_loginfo: IOC Log Info from MPI Reply
623  *
624  * This function compares IOC status and IOC log info trigger
625  * values with driver page 2 values and calls generic trigger
626  * handler to release HDBs if match found.
627  *
628  * Return: Nothing
629  */
mpi3mr_reply_trigger(struct mpi3mr_ioc * mrioc,u16 ioc_status,u32 ioc_loginfo)630 void mpi3mr_reply_trigger(struct mpi3mr_ioc *mrioc, u16 ioc_status,
631 	u32 ioc_loginfo)
632 {
633 	struct mpi3_driver2_trigger_reply *reply_trigger = NULL;
634 	u64 i = 0;
635 	unsigned long flags;
636 	u8 num_triggers, trigger_flags;
637 
638 	if (mrioc->reply_trigger_present) {
639 		spin_lock_irqsave(&mrioc->trigger_lock, flags);
640 		reply_trigger = (struct mpi3_driver2_trigger_reply *)
641 			mrioc->driver_pg2->trigger;
642 		num_triggers = mrioc->driver_pg2->num_triggers;
643 		for (i = 0; i < num_triggers; i++, reply_trigger++) {
644 			if (reply_trigger->type !=
645 			    MPI3_DRIVER2_TRIGGER_TYPE_REPLY)
646 				continue;
647 			if ((le16_to_cpu(reply_trigger->ioc_status) !=
648 			     ioc_status)
649 			    && (le16_to_cpu(reply_trigger->ioc_status) !=
650 			    MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL))
651 				continue;
652 			if ((le32_to_cpu(reply_trigger->ioc_log_info) !=
653 			    (le32_to_cpu(reply_trigger->ioc_log_info_mask) &
654 			     ioc_loginfo)))
655 				continue;
656 			trigger_flags = reply_trigger->flags;
657 			mpi3mr_process_trigger(mrioc,
658 			    MPI3MR_HDB_TRIGGER_TYPE_ELEMENT,
659 			    (union mpi3mr_trigger_data *)reply_trigger,
660 			    trigger_flags);
661 			break;
662 		}
663 		spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
664 	}
665 }
666 
667 /**
668  * mpi3mr_get_num_trigger - Gets number of HDB triggers
669  * @mrioc: Adapter instance reference
670  * @num_triggers: Number of triggers
671  * @page_action: Page action
672  *
673  * This function reads number of triggers by reading driver page
674  * 2
675  *
676  * Return: 0 on success and proper error codes on failure
677  */
mpi3mr_get_num_trigger(struct mpi3mr_ioc * mrioc,u8 * num_triggers,u8 page_action)678 static int mpi3mr_get_num_trigger(struct mpi3mr_ioc *mrioc, u8 *num_triggers,
679 	u8 page_action)
680 {
681 	struct mpi3_driver_page2 drvr_page2;
682 	int retval = 0;
683 
684 	*num_triggers = 0;
685 
686 	retval = mpi3mr_cfg_get_driver_pg2(mrioc, &drvr_page2,
687 	    sizeof(struct mpi3_driver_page2), page_action);
688 
689 	if (retval) {
690 		dprint_init(mrioc, "%s: driver page 2 read failed\n", __func__);
691 		return retval;
692 	}
693 	*num_triggers = drvr_page2.num_triggers;
694 	return retval;
695 }
696 
697 /**
698  * mpi3mr_refresh_trigger - Handler for Refresh trigger BSG
699  * @mrioc: Adapter instance reference
700  * @page_action: Page action
701  *
702  * This function caches the driver page 2 in the driver's memory
703  * by reading driver page 2 from the controller for a given page
704  * type and updates the HDB trigger values
705  *
706  * Return: 0 on success and proper error codes on failure
707  */
mpi3mr_refresh_trigger(struct mpi3mr_ioc * mrioc,u8 page_action)708 int mpi3mr_refresh_trigger(struct mpi3mr_ioc *mrioc, u8 page_action)
709 {
710 	u16 pg_sz = sizeof(struct mpi3_driver_page2);
711 	struct mpi3_driver_page2 *drvr_page2 = NULL;
712 	u8 trigger_type, num_triggers;
713 	int retval;
714 	int i = 0;
715 	unsigned long flags;
716 
717 	retval = mpi3mr_get_num_trigger(mrioc, &num_triggers, page_action);
718 
719 	if (retval)
720 		goto out;
721 
722 	pg_sz = offsetof(struct mpi3_driver_page2, trigger) +
723 		(num_triggers * sizeof(union mpi3_driver2_trigger_element));
724 	drvr_page2 = kzalloc(pg_sz, GFP_KERNEL);
725 	if (!drvr_page2) {
726 		retval = -ENOMEM;
727 		goto out;
728 	}
729 
730 	retval = mpi3mr_cfg_get_driver_pg2(mrioc, drvr_page2, pg_sz, page_action);
731 	if (retval) {
732 		dprint_init(mrioc, "%s: driver page 2 read failed\n", __func__);
733 		kfree(drvr_page2);
734 		goto out;
735 	}
736 	spin_lock_irqsave(&mrioc->trigger_lock, flags);
737 	kfree(mrioc->driver_pg2);
738 	mrioc->driver_pg2 = drvr_page2;
739 	mrioc->reply_trigger_present = false;
740 	mrioc->event_trigger_present = false;
741 	mrioc->scsisense_trigger_present = false;
742 
743 	for (i = 0; (i < mrioc->driver_pg2->num_triggers); i++) {
744 		trigger_type = mrioc->driver_pg2->trigger[i].event.type;
745 		switch (trigger_type) {
746 		case MPI3_DRIVER2_TRIGGER_TYPE_REPLY:
747 			mrioc->reply_trigger_present = true;
748 			break;
749 		case MPI3_DRIVER2_TRIGGER_TYPE_EVENT:
750 			mrioc->event_trigger_present = true;
751 			break;
752 		case MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE:
753 			mrioc->scsisense_trigger_present = true;
754 			break;
755 		default:
756 			break;
757 		}
758 	}
759 	spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
760 out:
761 	return retval;
762 }
763 
764 /**
765  * mpi3mr_release_diag_bufs - Release diag buffers
766  * @mrioc: Adapter instance reference
767  * @skip_rel_action: Skip release action and set buffer state
768  *
769  * This function calls helper function to release both trace and
770  * firmware buffers from the controller.
771  *
772  * Return: None
773  */
mpi3mr_release_diag_bufs(struct mpi3mr_ioc * mrioc,u8 skip_rel_action)774 void mpi3mr_release_diag_bufs(struct mpi3mr_ioc *mrioc, u8 skip_rel_action)
775 {
776 	u8 i;
777 	struct diag_buffer_desc *diag_buffer;
778 
779 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
780 		diag_buffer = &mrioc->diag_buffers[i];
781 		if (!(diag_buffer->addr))
782 			continue;
783 		if (diag_buffer->status == MPI3MR_HDB_BUFSTATUS_RELEASED)
784 			continue;
785 		if (!skip_rel_action)
786 			mpi3mr_issue_diag_buf_release(mrioc, diag_buffer);
787 		diag_buffer->status = MPI3MR_HDB_BUFSTATUS_RELEASED;
788 		atomic64_inc(&event_counter);
789 	}
790 }
791 
792 /**
793  * mpi3mr_set_trigger_data_in_hdb - Updates HDB trigger type and
794  * trigger data
795  *
796  * @hdb: HDB pointer
797  * @type: Trigger type
798  * @data: Trigger data
799  * @force: Trigger overwrite flag
800  * @trigger_data: Pointer to trigger data information
801  *
802  * Updates trigger type and trigger data based on parameter
803  * passed to this function
804  *
805  * Return: Nothing
806  */
mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc * hdb,u8 type,union mpi3mr_trigger_data * trigger_data,bool force)807 void mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc *hdb,
808 	u8 type, union mpi3mr_trigger_data *trigger_data, bool force)
809 {
810 	if ((!force) && (hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN))
811 		return;
812 	hdb->trigger_type = type;
813 	if (!trigger_data)
814 		memset(&hdb->trigger_data, 0, sizeof(*trigger_data));
815 	else
816 		memcpy(&hdb->trigger_data, trigger_data, sizeof(*trigger_data));
817 }
818 
819 /**
820  * mpi3mr_set_trigger_data_in_all_hdb - Updates HDB trigger type
821  * and trigger data for all HDB
822  *
823  * @mrioc: Adapter instance reference
824  * @type: Trigger type
825  * @data: Trigger data
826  * @force: Trigger overwrite flag
827  * @trigger_data: Pointer to trigger data information
828  *
829  * Updates trigger type and trigger data based on parameter
830  * passed to this function
831  *
832  * Return: Nothing
833  */
mpi3mr_set_trigger_data_in_all_hdb(struct mpi3mr_ioc * mrioc,u8 type,union mpi3mr_trigger_data * trigger_data,bool force)834 void mpi3mr_set_trigger_data_in_all_hdb(struct mpi3mr_ioc *mrioc,
835 	u8 type, union mpi3mr_trigger_data *trigger_data, bool force)
836 {
837 	struct diag_buffer_desc *hdb = NULL;
838 
839 	hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_TRACE);
840 	if (hdb)
841 		mpi3mr_set_trigger_data_in_hdb(hdb, type, trigger_data, force);
842 	hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
843 	if (hdb)
844 		mpi3mr_set_trigger_data_in_hdb(hdb, type, trigger_data, force);
845 }
846 
847 /**
848  * mpi3mr_hdbstatuschg_evt_th - HDB status change evt tophalf
849  * @mrioc: Adapter instance reference
850  * @event_reply: event data
851  *
852  * Modifies the status of the applicable diag buffer descriptors
853  *
854  * Return: Nothing
855  */
mpi3mr_hdbstatuschg_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)856 void mpi3mr_hdbstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
857 	struct mpi3_event_notification_reply *event_reply)
858 {
859 	struct mpi3_event_data_diag_buffer_status_change *evtdata;
860 	struct diag_buffer_desc *diag_buffer;
861 
862 	evtdata = (struct mpi3_event_data_diag_buffer_status_change *)
863 	    event_reply->event_data;
864 
865 	diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, evtdata->type);
866 	if (!diag_buffer)
867 		return;
868 	if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) &&
869 	    (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED))
870 		return;
871 	switch (evtdata->reason_code) {
872 	case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED:
873 	{
874 		diag_buffer->status = MPI3MR_HDB_BUFSTATUS_RELEASED;
875 		mpi3mr_set_trigger_data_in_hdb(diag_buffer,
876 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
877 		atomic64_inc(&event_counter);
878 		break;
879 	}
880 	case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED:
881 	{
882 		diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED;
883 		break;
884 	}
885 	case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED:
886 	{
887 		diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED;
888 		break;
889 	}
890 	default:
891 		dprint_event_th(mrioc, "%s: unknown reason_code(%d)\n",
892 		    __func__, evtdata->reason_code);
893 		break;
894 	}
895 }
896 
897 /**
898  * mpi3mr_diag_buffer_for_type - returns buffer desc for type
899  * @mrioc: Adapter instance reference
900  * @buf_type: Diagnostic buffer type
901  *
902  * Identifies matching diag descriptor from mrioc for given diag
903  * buffer type.
904  *
905  * Return: diag buffer descriptor on success, NULL on failures.
906  */
907 
908 struct diag_buffer_desc *
mpi3mr_diag_buffer_for_type(struct mpi3mr_ioc * mrioc,u8 buf_type)909 mpi3mr_diag_buffer_for_type(struct mpi3mr_ioc *mrioc, u8 buf_type)
910 {
911 	u8 i;
912 
913 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
914 		if (mrioc->diag_buffers[i].type == buf_type)
915 			return &mrioc->diag_buffers[i];
916 	}
917 	return NULL;
918 }
919 
920 /**
921  * mpi3mr_bsg_pel_abort - sends PEL abort request
922  * @mrioc: Adapter instance reference
923  *
924  * This function sends PEL abort request to the firmware through
925  * admin request queue.
926  *
927  * Return: 0 on success, -1 on failure
928  */
mpi3mr_bsg_pel_abort(struct mpi3mr_ioc * mrioc)929 static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc)
930 {
931 	struct mpi3_pel_req_action_abort pel_abort_req;
932 	struct mpi3_pel_reply *pel_reply;
933 	int retval = 0;
934 	u16 pe_log_status;
935 
936 	if (mrioc->reset_in_progress) {
937 		dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
938 		return -1;
939 	}
940 	if (mrioc->stop_bsgs || mrioc->block_on_pci_err) {
941 		dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
942 		return -1;
943 	}
944 
945 	memset(&pel_abort_req, 0, sizeof(pel_abort_req));
946 	mutex_lock(&mrioc->pel_abort_cmd.mutex);
947 	if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) {
948 		dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
949 		mutex_unlock(&mrioc->pel_abort_cmd.mutex);
950 		return -1;
951 	}
952 	mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING;
953 	mrioc->pel_abort_cmd.is_waiting = 1;
954 	mrioc->pel_abort_cmd.callback = NULL;
955 	pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT);
956 	pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
957 	pel_abort_req.action = MPI3_PEL_ACTION_ABORT;
958 	pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
959 
960 	mrioc->pel_abort_requested = 1;
961 	init_completion(&mrioc->pel_abort_cmd.done);
962 	retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req,
963 	    sizeof(pel_abort_req), 0);
964 	if (retval) {
965 		retval = -1;
966 		dprint_bsg_err(mrioc, "%s: admin request post failed\n",
967 		    __func__);
968 		mrioc->pel_abort_requested = 0;
969 		goto out_unlock;
970 	}
971 
972 	wait_for_completion_timeout(&mrioc->pel_abort_cmd.done,
973 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
974 	if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) {
975 		mrioc->pel_abort_cmd.is_waiting = 0;
976 		dprint_bsg_err(mrioc, "%s: command timedout\n", __func__);
977 		if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET))
978 			mpi3mr_soft_reset_handler(mrioc,
979 			    MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1);
980 		retval = -1;
981 		goto out_unlock;
982 	}
983 	if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
984 	     != MPI3_IOCSTATUS_SUCCESS) {
985 		dprint_bsg_err(mrioc,
986 		    "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
987 		    __func__, (mrioc->pel_abort_cmd.ioc_status &
988 		    MPI3_IOCSTATUS_STATUS_MASK),
989 		    mrioc->pel_abort_cmd.ioc_loginfo);
990 		retval = -1;
991 		goto out_unlock;
992 	}
993 	if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) {
994 		pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply;
995 		pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
996 		if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) {
997 			dprint_bsg_err(mrioc,
998 			    "%s: command failed, pel_status(0x%04x)\n",
999 			    __func__, pe_log_status);
1000 			retval = -1;
1001 		}
1002 	}
1003 
1004 out_unlock:
1005 	mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
1006 	mutex_unlock(&mrioc->pel_abort_cmd.mutex);
1007 	return retval;
1008 }
1009 /**
1010  * mpi3mr_bsg_verify_adapter - verify adapter number is valid
1011  * @ioc_number: Adapter number
1012  *
1013  * This function returns the adapter instance pointer of given
1014  * adapter number. If adapter number does not match with the
1015  * driver's adapter list, driver returns NULL.
1016  *
1017  * Return: adapter instance reference
1018  */
mpi3mr_bsg_verify_adapter(int ioc_number)1019 static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number)
1020 {
1021 	struct mpi3mr_ioc *mrioc = NULL;
1022 
1023 	spin_lock(&mrioc_list_lock);
1024 	list_for_each_entry(mrioc, &mrioc_list, list) {
1025 		if (mrioc->id == ioc_number) {
1026 			spin_unlock(&mrioc_list_lock);
1027 			return mrioc;
1028 		}
1029 	}
1030 	spin_unlock(&mrioc_list_lock);
1031 	return NULL;
1032 }
1033 
1034 /**
1035  * mpi3mr_bsg_refresh_hdb_triggers - Refresh HDB trigger data
1036  * @mrioc: Adapter instance reference
1037  * @job: BSG Job pointer
1038  *
1039  * This function reads the controller trigger config page as
1040  * defined by the input page type and refreshes the driver's
1041  * local trigger information structures with the controller's
1042  * config page data.
1043  *
1044  * Return: 0 on success and proper error codes on failure
1045  */
1046 static long
mpi3mr_bsg_refresh_hdb_triggers(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1047 mpi3mr_bsg_refresh_hdb_triggers(struct mpi3mr_ioc *mrioc,
1048 				struct bsg_job *job)
1049 {
1050 	struct mpi3mr_bsg_out_refresh_hdb_triggers refresh_triggers;
1051 	uint32_t data_out_sz;
1052 	u8 page_action;
1053 	long rval = -EINVAL;
1054 
1055 	data_out_sz = job->request_payload.payload_len;
1056 
1057 	if (data_out_sz != sizeof(refresh_triggers)) {
1058 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
1059 		    __func__);
1060 		return rval;
1061 	}
1062 
1063 	if (mrioc->unrecoverable) {
1064 		dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
1065 		    __func__);
1066 		return -EFAULT;
1067 	}
1068 	if (mrioc->reset_in_progress) {
1069 		dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
1070 		return -EAGAIN;
1071 	}
1072 
1073 	sg_copy_to_buffer(job->request_payload.sg_list,
1074 	    job->request_payload.sg_cnt,
1075 	    &refresh_triggers, sizeof(refresh_triggers));
1076 
1077 	switch (refresh_triggers.page_type) {
1078 	case MPI3MR_HDB_REFRESH_TYPE_CURRENT:
1079 		page_action = MPI3_CONFIG_ACTION_READ_CURRENT;
1080 		break;
1081 	case MPI3MR_HDB_REFRESH_TYPE_DEFAULT:
1082 		page_action = MPI3_CONFIG_ACTION_READ_DEFAULT;
1083 		break;
1084 	case MPI3MR_HDB_HDB_REFRESH_TYPE_PERSISTENT:
1085 		page_action = MPI3_CONFIG_ACTION_READ_PERSISTENT;
1086 		break;
1087 	default:
1088 		dprint_bsg_err(mrioc,
1089 		    "%s: unsupported refresh trigger, page_type %d\n",
1090 		    __func__, refresh_triggers.page_type);
1091 		return rval;
1092 	}
1093 	rval = mpi3mr_refresh_trigger(mrioc, page_action);
1094 
1095 	return rval;
1096 }
1097 
1098 /**
1099  * mpi3mr_bsg_upload_hdb - Upload a specific HDB to user space
1100  * @mrioc: Adapter instance reference
1101  * @job: BSG Job pointer
1102  *
1103  * Return: 0 on success and proper error codes on failure
1104  */
mpi3mr_bsg_upload_hdb(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1105 static long mpi3mr_bsg_upload_hdb(struct mpi3mr_ioc *mrioc,
1106 				  struct bsg_job *job)
1107 {
1108 	struct mpi3mr_bsg_out_upload_hdb upload_hdb;
1109 	struct diag_buffer_desc *diag_buffer;
1110 	uint32_t data_out_size;
1111 	uint32_t data_in_size;
1112 
1113 	data_out_size = job->request_payload.payload_len;
1114 	data_in_size = job->reply_payload.payload_len;
1115 
1116 	if (data_out_size != sizeof(upload_hdb)) {
1117 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
1118 		    __func__);
1119 		return -EINVAL;
1120 	}
1121 
1122 	sg_copy_to_buffer(job->request_payload.sg_list,
1123 			  job->request_payload.sg_cnt,
1124 			  &upload_hdb, sizeof(upload_hdb));
1125 
1126 	if ((!upload_hdb.length) || (data_in_size != upload_hdb.length)) {
1127 		dprint_bsg_err(mrioc, "%s: invalid length argument\n",
1128 		    __func__);
1129 		return -EINVAL;
1130 	}
1131 	diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, upload_hdb.buf_type);
1132 	if ((!diag_buffer) || (!diag_buffer->addr)) {
1133 		dprint_bsg_err(mrioc, "%s: invalid buffer type %d\n",
1134 		    __func__, upload_hdb.buf_type);
1135 		return -EINVAL;
1136 	}
1137 
1138 	if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_RELEASED) &&
1139 	    (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) {
1140 		dprint_bsg_err(mrioc,
1141 		    "%s: invalid buffer status %d for type %d\n",
1142 		    __func__, diag_buffer->status, upload_hdb.buf_type);
1143 		return -EINVAL;
1144 	}
1145 
1146 	if ((upload_hdb.start_offset + upload_hdb.length) > diag_buffer->size) {
1147 		dprint_bsg_err(mrioc,
1148 		    "%s: invalid start offset %d, length %d for type %d\n",
1149 		    __func__, upload_hdb.start_offset, upload_hdb.length,
1150 		    upload_hdb.buf_type);
1151 		return -EINVAL;
1152 	}
1153 	sg_copy_from_buffer(job->reply_payload.sg_list,
1154 			    job->reply_payload.sg_cnt,
1155 	    (diag_buffer->addr + upload_hdb.start_offset),
1156 	    data_in_size);
1157 	return 0;
1158 }
1159 
1160 /**
1161  * mpi3mr_bsg_repost_hdb - Re-post HDB
1162  * @mrioc: Adapter instance reference
1163  * @job: BSG job pointer
1164  *
1165  * This function retrieves the HDB descriptor corresponding to a
1166  * given buffer type and if the HDB is in released status then
1167  * posts the HDB with the firmware.
1168  *
1169  * Return: 0 on success and proper error codes on failure
1170  */
mpi3mr_bsg_repost_hdb(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1171 static long mpi3mr_bsg_repost_hdb(struct mpi3mr_ioc *mrioc,
1172 				  struct bsg_job *job)
1173 {
1174 	struct mpi3mr_bsg_out_repost_hdb repost_hdb;
1175 	struct diag_buffer_desc *diag_buffer;
1176 	uint32_t data_out_sz;
1177 
1178 	data_out_sz = job->request_payload.payload_len;
1179 
1180 	if (data_out_sz != sizeof(repost_hdb)) {
1181 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
1182 		    __func__);
1183 		return -EINVAL;
1184 	}
1185 	if (mrioc->unrecoverable) {
1186 		dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
1187 		    __func__);
1188 		return -EFAULT;
1189 	}
1190 	if (mrioc->reset_in_progress) {
1191 		dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
1192 		return -EAGAIN;
1193 	}
1194 
1195 	sg_copy_to_buffer(job->request_payload.sg_list,
1196 			  job->request_payload.sg_cnt,
1197 			  &repost_hdb, sizeof(repost_hdb));
1198 
1199 	diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, repost_hdb.buf_type);
1200 	if ((!diag_buffer) || (!diag_buffer->addr)) {
1201 		dprint_bsg_err(mrioc, "%s: invalid buffer type %d\n",
1202 		    __func__, repost_hdb.buf_type);
1203 		return -EINVAL;
1204 	}
1205 
1206 	if (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_RELEASED) {
1207 		dprint_bsg_err(mrioc,
1208 		    "%s: invalid buffer status %d for type %d\n",
1209 		    __func__, diag_buffer->status, repost_hdb.buf_type);
1210 		return -EINVAL;
1211 	}
1212 
1213 	if (mpi3mr_issue_diag_buf_post(mrioc, diag_buffer)) {
1214 		dprint_bsg_err(mrioc, "%s: post failed for type %d\n",
1215 		    __func__, repost_hdb.buf_type);
1216 		return -EFAULT;
1217 	}
1218 	mpi3mr_set_trigger_data_in_hdb(diag_buffer,
1219 	    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
1220 
1221 	return 0;
1222 }
1223 
1224 /**
1225  * mpi3mr_bsg_query_hdb - Handler for query HDB command
1226  * @mrioc: Adapter instance reference
1227  * @job: BSG job pointer
1228  *
1229  * This function prepares and copies the host diagnostic buffer
1230  * entries to the user buffer.
1231  *
1232  * Return: 0 on success and proper error codes on failure
1233  */
mpi3mr_bsg_query_hdb(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1234 static long mpi3mr_bsg_query_hdb(struct mpi3mr_ioc *mrioc,
1235 				 struct bsg_job *job)
1236 {
1237 	long rval = 0;
1238 	struct mpi3mr_bsg_in_hdb_status *hbd_status;
1239 	struct mpi3mr_hdb_entry *hbd_status_entry;
1240 	u32 length, min_length;
1241 	u8 i;
1242 	struct diag_buffer_desc *diag_buffer;
1243 	uint32_t data_in_sz = 0;
1244 
1245 	data_in_sz = job->request_payload.payload_len;
1246 
1247 	length = (sizeof(*hbd_status) + ((MPI3MR_MAX_NUM_HDB - 1) *
1248 		    sizeof(*hbd_status_entry)));
1249 	hbd_status = kmalloc(length, GFP_KERNEL);
1250 	if (!hbd_status)
1251 		return -ENOMEM;
1252 	hbd_status_entry = &hbd_status->entry[0];
1253 
1254 	hbd_status->num_hdb_types = MPI3MR_MAX_NUM_HDB;
1255 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
1256 		diag_buffer = &mrioc->diag_buffers[i];
1257 		hbd_status_entry->buf_type = diag_buffer->type;
1258 		hbd_status_entry->status = diag_buffer->status;
1259 		hbd_status_entry->trigger_type = diag_buffer->trigger_type;
1260 		memcpy(&hbd_status_entry->trigger_data,
1261 		    &diag_buffer->trigger_data,
1262 		    sizeof(hbd_status_entry->trigger_data));
1263 		hbd_status_entry->size = (diag_buffer->size / 1024);
1264 		hbd_status_entry++;
1265 	}
1266 	hbd_status->element_trigger_format =
1267 		MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA;
1268 
1269 	if (data_in_sz < 4) {
1270 		dprint_bsg_err(mrioc, "%s: invalid size passed\n", __func__);
1271 		rval = -EINVAL;
1272 		goto out;
1273 	}
1274 	min_length = min(data_in_sz, length);
1275 	if (job->request_payload.payload_len >= min_length) {
1276 		sg_copy_from_buffer(job->request_payload.sg_list,
1277 				    job->request_payload.sg_cnt,
1278 				    hbd_status, min_length);
1279 		rval = 0;
1280 	}
1281 out:
1282 	kfree(hbd_status);
1283 	return rval;
1284 }
1285 
1286 
1287 /**
1288  * mpi3mr_enable_logdata - Handler for log data enable
1289  * @mrioc: Adapter instance reference
1290  * @job: BSG job reference
1291  *
1292  * This function enables log data caching in the driver if not
1293  * already enabled and return the maximum number of log data
1294  * entries that can be cached in the driver.
1295  *
1296  * Return: 0 on success and proper error codes on failure
1297  */
mpi3mr_enable_logdata(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1298 static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc,
1299 	struct bsg_job *job)
1300 {
1301 	struct mpi3mr_logdata_enable logdata_enable;
1302 
1303 	if (!mrioc->logdata_buf) {
1304 		mrioc->logdata_entry_sz =
1305 		    (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4))
1306 		    + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ;
1307 		mrioc->logdata_buf_idx = 0;
1308 		mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES,
1309 		    mrioc->logdata_entry_sz, GFP_KERNEL);
1310 
1311 		if (!mrioc->logdata_buf)
1312 			return -ENOMEM;
1313 	}
1314 
1315 	memset(&logdata_enable, 0, sizeof(logdata_enable));
1316 	logdata_enable.max_entries =
1317 	    MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
1318 	if (job->request_payload.payload_len >= sizeof(logdata_enable)) {
1319 		sg_copy_from_buffer(job->request_payload.sg_list,
1320 				    job->request_payload.sg_cnt,
1321 				    &logdata_enable, sizeof(logdata_enable));
1322 		return 0;
1323 	}
1324 
1325 	return -EINVAL;
1326 }
1327 /**
1328  * mpi3mr_get_logdata - Handler for get log data
1329  * @mrioc: Adapter instance reference
1330  * @job: BSG job pointer
1331  * This function copies the log data entries to the user buffer
1332  * when log caching is enabled in the driver.
1333  *
1334  * Return: 0 on success and proper error codes on failure
1335  */
mpi3mr_get_logdata(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1336 static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc,
1337 	struct bsg_job *job)
1338 {
1339 	u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz;
1340 
1341 	if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz))
1342 		return -EINVAL;
1343 
1344 	num_entries = job->request_payload.payload_len / entry_sz;
1345 	if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES)
1346 		num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
1347 	sz = num_entries * entry_sz;
1348 
1349 	if (job->request_payload.payload_len >= sz) {
1350 		sg_copy_from_buffer(job->request_payload.sg_list,
1351 				    job->request_payload.sg_cnt,
1352 				    mrioc->logdata_buf, sz);
1353 		return 0;
1354 	}
1355 	return -EINVAL;
1356 }
1357 
1358 /**
1359  * mpi3mr_bsg_pel_enable - Handler for PEL enable driver
1360  * @mrioc: Adapter instance reference
1361  * @job: BSG job pointer
1362  *
1363  * This function is the handler for PEL enable driver.
1364  * Validates the application given class and locale and if
1365  * requires aborts the existing PEL wait request and/or issues
1366  * new PEL wait request to the firmware and returns.
1367  *
1368  * Return: 0 on success and proper error codes on failure.
1369  */
mpi3mr_bsg_pel_enable(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1370 static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc,
1371 				  struct bsg_job *job)
1372 {
1373 	long rval = -EINVAL;
1374 	struct mpi3mr_bsg_out_pel_enable pel_enable;
1375 	u8 issue_pel_wait;
1376 	u8 tmp_class;
1377 	u16 tmp_locale;
1378 
1379 	if (job->request_payload.payload_len != sizeof(pel_enable)) {
1380 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
1381 		    __func__);
1382 		return rval;
1383 	}
1384 
1385 	if (mrioc->unrecoverable) {
1386 		dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
1387 			       __func__);
1388 		return -EFAULT;
1389 	}
1390 
1391 	if (mrioc->reset_in_progress) {
1392 		dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
1393 		return -EAGAIN;
1394 	}
1395 
1396 	if (mrioc->stop_bsgs) {
1397 		dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
1398 		return -EAGAIN;
1399 	}
1400 
1401 	sg_copy_to_buffer(job->request_payload.sg_list,
1402 			  job->request_payload.sg_cnt,
1403 			  &pel_enable, sizeof(pel_enable));
1404 
1405 	if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) {
1406 		dprint_bsg_err(mrioc, "%s: out of range class %d sent\n",
1407 			__func__, pel_enable.pel_class);
1408 		rval = 0;
1409 		goto out;
1410 	}
1411 	if (!mrioc->pel_enabled)
1412 		issue_pel_wait = 1;
1413 	else {
1414 		if ((mrioc->pel_class <= pel_enable.pel_class) &&
1415 		    !((mrioc->pel_locale & pel_enable.pel_locale) ^
1416 		      pel_enable.pel_locale)) {
1417 			issue_pel_wait = 0;
1418 			rval = 0;
1419 		} else {
1420 			pel_enable.pel_locale |= mrioc->pel_locale;
1421 
1422 			if (mrioc->pel_class < pel_enable.pel_class)
1423 				pel_enable.pel_class = mrioc->pel_class;
1424 
1425 			rval = mpi3mr_bsg_pel_abort(mrioc);
1426 			if (rval) {
1427 				dprint_bsg_err(mrioc,
1428 				    "%s: pel_abort failed, status(%ld)\n",
1429 				    __func__, rval);
1430 				goto out;
1431 			}
1432 			issue_pel_wait = 1;
1433 		}
1434 	}
1435 	if (issue_pel_wait) {
1436 		tmp_class = mrioc->pel_class;
1437 		tmp_locale = mrioc->pel_locale;
1438 		mrioc->pel_class = pel_enable.pel_class;
1439 		mrioc->pel_locale = pel_enable.pel_locale;
1440 		mrioc->pel_enabled = 1;
1441 		rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL);
1442 		if (rval) {
1443 			mrioc->pel_class = tmp_class;
1444 			mrioc->pel_locale = tmp_locale;
1445 			mrioc->pel_enabled = 0;
1446 			dprint_bsg_err(mrioc,
1447 			    "%s: pel get sequence number failed, status(%ld)\n",
1448 			    __func__, rval);
1449 		}
1450 	}
1451 
1452 out:
1453 	return rval;
1454 }
1455 /**
1456  * mpi3mr_get_all_tgt_info - Get all target information
1457  * @mrioc: Adapter instance reference
1458  * @job: BSG job reference
1459  *
1460  * This function copies the driver managed target devices device
1461  * handle, persistent ID, bus ID and taret ID to the user
1462  * provided buffer for the specific controller. This function
1463  * also provides the number of devices managed by the driver for
1464  * the specific controller.
1465  *
1466  * Return: 0 on success and proper error codes on failure
1467  */
mpi3mr_get_all_tgt_info(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1468 static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
1469 	struct bsg_job *job)
1470 {
1471 	u16 num_devices = 0, i = 0, size;
1472 	unsigned long flags;
1473 	struct mpi3mr_tgt_dev *tgtdev;
1474 	struct mpi3mr_device_map_info *devmap_info = NULL;
1475 	struct mpi3mr_all_tgt_info *alltgt_info = NULL;
1476 	uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0;
1477 
1478 	if (job->request_payload.payload_len < sizeof(u32)) {
1479 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
1480 		    __func__);
1481 		return -EINVAL;
1482 	}
1483 
1484 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
1485 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
1486 		num_devices++;
1487 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
1488 
1489 	if ((job->request_payload.payload_len <= sizeof(u64)) ||
1490 		list_empty(&mrioc->tgtdev_list)) {
1491 		sg_copy_from_buffer(job->request_payload.sg_list,
1492 				    job->request_payload.sg_cnt,
1493 				    &num_devices, sizeof(num_devices));
1494 		return 0;
1495 	}
1496 
1497 	kern_entrylen = num_devices * sizeof(*devmap_info);
1498 	size = sizeof(u64) + kern_entrylen;
1499 	alltgt_info = kzalloc(size, GFP_KERNEL);
1500 	if (!alltgt_info)
1501 		return -ENOMEM;
1502 
1503 	devmap_info = alltgt_info->dmi;
1504 	memset((u8 *)devmap_info, 0xFF, kern_entrylen);
1505 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
1506 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
1507 		if (i < num_devices) {
1508 			devmap_info[i].handle = tgtdev->dev_handle;
1509 			devmap_info[i].perst_id = tgtdev->perst_id;
1510 			if (tgtdev->host_exposed && tgtdev->starget) {
1511 				devmap_info[i].target_id = tgtdev->starget->id;
1512 				devmap_info[i].bus_id =
1513 				    tgtdev->starget->channel;
1514 			}
1515 			i++;
1516 		}
1517 	}
1518 	num_devices = i;
1519 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
1520 
1521 	alltgt_info->num_devices = num_devices;
1522 
1523 	usr_entrylen = (job->request_payload.payload_len - sizeof(u64)) /
1524 		sizeof(*devmap_info);
1525 	usr_entrylen *= sizeof(*devmap_info);
1526 	min_entrylen = min(usr_entrylen, kern_entrylen);
1527 
1528 	sg_copy_from_buffer(job->request_payload.sg_list,
1529 			    job->request_payload.sg_cnt,
1530 			    alltgt_info, (min_entrylen + sizeof(u64)));
1531 	kfree(alltgt_info);
1532 	return 0;
1533 }
1534 /**
1535  * mpi3mr_get_change_count - Get topology change count
1536  * @mrioc: Adapter instance reference
1537  * @job: BSG job reference
1538  *
1539  * This function copies the toplogy change count provided by the
1540  * driver in events and cached in the driver to the user
1541  * provided buffer for the specific controller.
1542  *
1543  * Return: 0 on success and proper error codes on failure
1544  */
mpi3mr_get_change_count(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1545 static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc,
1546 	struct bsg_job *job)
1547 {
1548 	struct mpi3mr_change_count chgcnt;
1549 
1550 	memset(&chgcnt, 0, sizeof(chgcnt));
1551 	chgcnt.change_count = mrioc->change_count;
1552 	if (job->request_payload.payload_len >= sizeof(chgcnt)) {
1553 		sg_copy_from_buffer(job->request_payload.sg_list,
1554 				    job->request_payload.sg_cnt,
1555 				    &chgcnt, sizeof(chgcnt));
1556 		return 0;
1557 	}
1558 	return -EINVAL;
1559 }
1560 
1561 /**
1562  * mpi3mr_bsg_adp_reset - Issue controller reset
1563  * @mrioc: Adapter instance reference
1564  * @job: BSG job reference
1565  *
1566  * This function identifies the user provided reset type and
1567  * issues approporiate reset to the controller and wait for that
1568  * to complete and reinitialize the controller and then returns
1569  *
1570  * Return: 0 on success and proper error codes on failure
1571  */
mpi3mr_bsg_adp_reset(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1572 static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc,
1573 	struct bsg_job *job)
1574 {
1575 	long rval = -EINVAL;
1576 	u8 save_snapdump;
1577 	struct mpi3mr_bsg_adp_reset adpreset;
1578 
1579 	if (job->request_payload.payload_len !=
1580 			sizeof(adpreset)) {
1581 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
1582 		    __func__);
1583 		goto out;
1584 	}
1585 
1586 	if (mrioc->unrecoverable || mrioc->block_on_pci_err)
1587 		return -EINVAL;
1588 
1589 	sg_copy_to_buffer(job->request_payload.sg_list,
1590 			  job->request_payload.sg_cnt,
1591 			  &adpreset, sizeof(adpreset));
1592 
1593 	switch (adpreset.reset_type) {
1594 	case MPI3MR_BSG_ADPRESET_SOFT:
1595 		save_snapdump = 0;
1596 		break;
1597 	case MPI3MR_BSG_ADPRESET_DIAG_FAULT:
1598 		save_snapdump = 1;
1599 		break;
1600 	default:
1601 		dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n",
1602 		    __func__, adpreset.reset_type);
1603 		goto out;
1604 	}
1605 
1606 	rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP,
1607 	    save_snapdump);
1608 
1609 	if (rval)
1610 		dprint_bsg_err(mrioc,
1611 		    "%s: reset handler returned error(%ld) for reset type %d\n",
1612 		    __func__, rval, adpreset.reset_type);
1613 out:
1614 	return rval;
1615 }
1616 
1617 /**
1618  * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler
1619  * @mrioc: Adapter instance reference
1620  * @job: BSG job reference
1621  *
1622  * This function provides adapter information for the given
1623  * controller
1624  *
1625  * Return: 0 on success and proper error codes on failure
1626  */
mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1627 static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc,
1628 	struct bsg_job *job)
1629 {
1630 	enum mpi3mr_iocstate ioc_state;
1631 	struct mpi3mr_bsg_in_adpinfo adpinfo;
1632 
1633 	memset(&adpinfo, 0, sizeof(adpinfo));
1634 	adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY;
1635 	adpinfo.pci_dev_id = mrioc->pdev->device;
1636 	adpinfo.pci_dev_hw_rev = mrioc->pdev->revision;
1637 	adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device;
1638 	adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor;
1639 	adpinfo.pci_bus = mrioc->pdev->bus->number;
1640 	adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn);
1641 	adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn);
1642 	adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus);
1643 	adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION;
1644 
1645 	ioc_state = mpi3mr_get_iocstate(mrioc);
1646 	if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
1647 		adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
1648 	else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
1649 		adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
1650 	else if (ioc_state == MRIOC_STATE_FAULT)
1651 		adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
1652 	else
1653 		adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
1654 
1655 	memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info,
1656 	    sizeof(adpinfo.driver_info));
1657 
1658 	if (job->request_payload.payload_len >= sizeof(adpinfo)) {
1659 		sg_copy_from_buffer(job->request_payload.sg_list,
1660 				    job->request_payload.sg_cnt,
1661 				    &adpinfo, sizeof(adpinfo));
1662 		return 0;
1663 	}
1664 	return -EINVAL;
1665 }
1666 
1667 /**
1668  * mpi3mr_bsg_process_drv_cmds - Driver Command handler
1669  * @job: BSG job reference
1670  *
1671  * This function is the top level handler for driver commands,
1672  * this does basic validation of the buffer and identifies the
1673  * opcode and switches to correct sub handler.
1674  *
1675  * Return: 0 on success and proper error codes on failure
1676  */
mpi3mr_bsg_process_drv_cmds(struct bsg_job * job)1677 static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job)
1678 {
1679 	long rval = -EINVAL;
1680 	struct mpi3mr_ioc *mrioc = NULL;
1681 	struct mpi3mr_bsg_packet *bsg_req = NULL;
1682 	struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL;
1683 
1684 	bsg_req = job->request;
1685 	drvrcmd = &bsg_req->cmd.drvrcmd;
1686 
1687 	mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id);
1688 	if (!mrioc)
1689 		return -ENODEV;
1690 
1691 	if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) {
1692 		rval = mpi3mr_bsg_populate_adpinfo(mrioc, job);
1693 		return rval;
1694 	}
1695 
1696 	if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex))
1697 		return -ERESTARTSYS;
1698 
1699 	switch (drvrcmd->opcode) {
1700 	case MPI3MR_DRVBSG_OPCODE_ADPRESET:
1701 		rval = mpi3mr_bsg_adp_reset(mrioc, job);
1702 		break;
1703 	case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO:
1704 		rval = mpi3mr_get_all_tgt_info(mrioc, job);
1705 		break;
1706 	case MPI3MR_DRVBSG_OPCODE_GETCHGCNT:
1707 		rval = mpi3mr_get_change_count(mrioc, job);
1708 		break;
1709 	case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE:
1710 		rval = mpi3mr_enable_logdata(mrioc, job);
1711 		break;
1712 	case MPI3MR_DRVBSG_OPCODE_GETLOGDATA:
1713 		rval = mpi3mr_get_logdata(mrioc, job);
1714 		break;
1715 	case MPI3MR_DRVBSG_OPCODE_PELENABLE:
1716 		rval = mpi3mr_bsg_pel_enable(mrioc, job);
1717 		break;
1718 	case MPI3MR_DRVBSG_OPCODE_QUERY_HDB:
1719 		rval = mpi3mr_bsg_query_hdb(mrioc, job);
1720 		break;
1721 	case MPI3MR_DRVBSG_OPCODE_REPOST_HDB:
1722 		rval = mpi3mr_bsg_repost_hdb(mrioc, job);
1723 		break;
1724 	case MPI3MR_DRVBSG_OPCODE_UPLOAD_HDB:
1725 		rval = mpi3mr_bsg_upload_hdb(mrioc, job);
1726 		break;
1727 	case MPI3MR_DRVBSG_OPCODE_REFRESH_HDB_TRIGGERS:
1728 		rval = mpi3mr_bsg_refresh_hdb_triggers(mrioc, job);
1729 		break;
1730 	case MPI3MR_DRVBSG_OPCODE_UNKNOWN:
1731 	default:
1732 		pr_err("%s: unsupported driver command opcode %d\n",
1733 		    MPI3MR_DRIVER_NAME, drvrcmd->opcode);
1734 		break;
1735 	}
1736 	mutex_unlock(&mrioc->bsg_cmds.mutex);
1737 	return rval;
1738 }
1739 
1740 /**
1741  * mpi3mr_total_num_ioctl_sges - Count number of SGEs required
1742  * @drv_bufs: DMA address of the buffers to be placed in sgl
1743  * @bufcnt: Number of DMA buffers
1744  *
1745  * This function returns total number of data SGEs required
1746  * including zero length SGEs and excluding management request
1747  * and response buffer for the given list of data buffer
1748  * descriptors
1749  *
1750  * Return: Number of SGE elements needed
1751  */
mpi3mr_total_num_ioctl_sges(struct mpi3mr_buf_map * drv_bufs,u8 bufcnt)1752 static inline u16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_buf_map *drv_bufs,
1753 					      u8 bufcnt)
1754 {
1755 	u16 i, sge_count = 0;
1756 
1757 	for (i = 0; i < bufcnt; i++, drv_bufs++) {
1758 		if (drv_bufs->data_dir == DMA_NONE ||
1759 		    drv_bufs->kern_buf)
1760 			continue;
1761 		sge_count += drv_bufs->num_dma_desc;
1762 		if (!drv_bufs->num_dma_desc)
1763 			sge_count++;
1764 	}
1765 	return sge_count;
1766 }
1767 
1768 /**
1769  * mpi3mr_bsg_build_sgl - SGL construction for MPI commands
1770  * @mrioc: Adapter instance reference
1771  * @mpi_req: MPI request
1772  * @sgl_offset: offset to start sgl in the MPI request
1773  * @drv_bufs: DMA address of the buffers to be placed in sgl
1774  * @bufcnt: Number of DMA buffers
1775  * @is_rmc: Does the buffer list has management command buffer
1776  * @is_rmr: Does the buffer list has management response buffer
1777  * @num_datasges: Number of data buffers in the list
1778  *
1779  * This function places the DMA address of the given buffers in
1780  * proper format as SGEs in the given MPI request.
1781  *
1782  * Return: 0 on success,-1 on failure
1783  */
mpi3mr_bsg_build_sgl(struct mpi3mr_ioc * mrioc,u8 * mpi_req,u32 sgl_offset,struct mpi3mr_buf_map * drv_bufs,u8 bufcnt,u8 is_rmc,u8 is_rmr,u8 num_datasges)1784 static int mpi3mr_bsg_build_sgl(struct mpi3mr_ioc *mrioc, u8 *mpi_req,
1785 				u32 sgl_offset, struct mpi3mr_buf_map *drv_bufs,
1786 				u8 bufcnt, u8 is_rmc, u8 is_rmr, u8 num_datasges)
1787 {
1788 	struct mpi3_request_header *mpi_header =
1789 		(struct mpi3_request_header *)mpi_req;
1790 	u8 *sgl = (mpi_req + sgl_offset), count = 0;
1791 	struct mpi3_mgmt_passthrough_request *rmgmt_req =
1792 	    (struct mpi3_mgmt_passthrough_request *)mpi_req;
1793 	struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
1794 	u8 flag, sgl_flags, sgl_flag_eob, sgl_flags_last, last_chain_sgl_flag;
1795 	u16 available_sges, i, sges_needed;
1796 	u32 sge_element_size = sizeof(struct mpi3_sge_common);
1797 	bool chain_used = false;
1798 
1799 	sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
1800 		MPI3_SGE_FLAGS_DLAS_SYSTEM;
1801 	sgl_flag_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER;
1802 	sgl_flags_last = sgl_flag_eob | MPI3_SGE_FLAGS_END_OF_LIST;
1803 	last_chain_sgl_flag = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
1804 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
1805 
1806 	sges_needed = mpi3mr_total_num_ioctl_sges(drv_bufs, bufcnt);
1807 
1808 	if (is_rmc) {
1809 		mpi3mr_add_sg_single(&rmgmt_req->command_sgl,
1810 		    sgl_flags_last, drv_buf_iter->kern_buf_len,
1811 		    drv_buf_iter->kern_buf_dma);
1812 		sgl = (u8 *)drv_buf_iter->kern_buf +
1813 			drv_buf_iter->bsg_buf_len;
1814 		available_sges = (drv_buf_iter->kern_buf_len -
1815 		    drv_buf_iter->bsg_buf_len) / sge_element_size;
1816 
1817 		if (sges_needed > available_sges)
1818 			return -1;
1819 
1820 		chain_used = true;
1821 		drv_buf_iter++;
1822 		count++;
1823 		if (is_rmr) {
1824 			mpi3mr_add_sg_single(&rmgmt_req->response_sgl,
1825 			    sgl_flags_last, drv_buf_iter->kern_buf_len,
1826 			    drv_buf_iter->kern_buf_dma);
1827 			drv_buf_iter++;
1828 			count++;
1829 		} else
1830 			mpi3mr_build_zero_len_sge(
1831 			    &rmgmt_req->response_sgl);
1832 		if (num_datasges) {
1833 			i = 0;
1834 			goto build_sges;
1835 		}
1836 	} else {
1837 		if (sgl_offset >= MPI3MR_ADMIN_REQ_FRAME_SZ)
1838 			return -1;
1839 		available_sges = (MPI3MR_ADMIN_REQ_FRAME_SZ - sgl_offset) /
1840 		sge_element_size;
1841 		if (!available_sges)
1842 			return -1;
1843 	}
1844 	if (!num_datasges) {
1845 		mpi3mr_build_zero_len_sge(sgl);
1846 		return 0;
1847 	}
1848 	if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) {
1849 		if ((sges_needed > 2) || (sges_needed > available_sges))
1850 			return -1;
1851 		for (; count < bufcnt; count++, drv_buf_iter++) {
1852 			if (drv_buf_iter->data_dir == DMA_NONE ||
1853 			    !drv_buf_iter->num_dma_desc)
1854 				continue;
1855 			mpi3mr_add_sg_single(sgl, sgl_flags_last,
1856 					     drv_buf_iter->dma_desc[0].size,
1857 					     drv_buf_iter->dma_desc[0].dma_addr);
1858 			sgl += sge_element_size;
1859 		}
1860 		return 0;
1861 	}
1862 	i = 0;
1863 
1864 build_sges:
1865 	for (; count < bufcnt; count++, drv_buf_iter++) {
1866 		if (drv_buf_iter->data_dir == DMA_NONE)
1867 			continue;
1868 		if (!drv_buf_iter->num_dma_desc) {
1869 			if (chain_used && !available_sges)
1870 				return -1;
1871 			if (!chain_used && (available_sges == 1) &&
1872 			    (sges_needed > 1))
1873 				goto setup_chain;
1874 			flag = sgl_flag_eob;
1875 			if (num_datasges == 1)
1876 				flag = sgl_flags_last;
1877 			mpi3mr_add_sg_single(sgl, flag, 0, 0);
1878 			sgl += sge_element_size;
1879 			sges_needed--;
1880 			available_sges--;
1881 			num_datasges--;
1882 			continue;
1883 		}
1884 		for (; i < drv_buf_iter->num_dma_desc; i++) {
1885 			if (chain_used && !available_sges)
1886 				return -1;
1887 			if (!chain_used && (available_sges == 1) &&
1888 			    (sges_needed > 1))
1889 				goto setup_chain;
1890 			flag = sgl_flags;
1891 			if (i == (drv_buf_iter->num_dma_desc - 1)) {
1892 				if (num_datasges == 1)
1893 					flag = sgl_flags_last;
1894 				else
1895 					flag = sgl_flag_eob;
1896 			}
1897 
1898 			mpi3mr_add_sg_single(sgl, flag,
1899 					     drv_buf_iter->dma_desc[i].size,
1900 					     drv_buf_iter->dma_desc[i].dma_addr);
1901 			sgl += sge_element_size;
1902 			available_sges--;
1903 			sges_needed--;
1904 		}
1905 		num_datasges--;
1906 		i = 0;
1907 	}
1908 	return 0;
1909 
1910 setup_chain:
1911 	available_sges = mrioc->ioctl_chain_sge.size / sge_element_size;
1912 	if (sges_needed > available_sges)
1913 		return -1;
1914 	mpi3mr_add_sg_single(sgl, last_chain_sgl_flag,
1915 			     (sges_needed * sge_element_size),
1916 			     mrioc->ioctl_chain_sge.dma_addr);
1917 	memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size);
1918 	sgl = (u8 *)mrioc->ioctl_chain_sge.addr;
1919 	chain_used = true;
1920 	goto build_sges;
1921 }
1922 
1923 /**
1924  * mpi3mr_get_nvme_data_fmt - returns the NVMe data format
1925  * @nvme_encap_request: NVMe encapsulated MPI request
1926  *
1927  * This function returns the type of the data format specified
1928  * in user provided NVMe command in NVMe encapsulated request.
1929  *
1930  * Return: Data format of the NVMe command (PRP/SGL etc)
1931  */
mpi3mr_get_nvme_data_fmt(struct mpi3_nvme_encapsulated_request * nvme_encap_request)1932 static unsigned int mpi3mr_get_nvme_data_fmt(
1933 	struct mpi3_nvme_encapsulated_request *nvme_encap_request)
1934 {
1935 	u8 format = 0;
1936 
1937 	format = ((nvme_encap_request->command[0] & 0xc000) >> 14);
1938 	return format;
1939 
1940 }
1941 
1942 /**
1943  * mpi3mr_build_nvme_sgl - SGL constructor for NVME
1944  *				   encapsulated request
1945  * @mrioc: Adapter instance reference
1946  * @nvme_encap_request: NVMe encapsulated MPI request
1947  * @drv_bufs: DMA address of the buffers to be placed in sgl
1948  * @bufcnt: Number of DMA buffers
1949  *
1950  * This function places the DMA address of the given buffers in
1951  * proper format as SGEs in the given NVMe encapsulated request.
1952  *
1953  * Return: 0 on success, -1 on failure
1954  */
mpi3mr_build_nvme_sgl(struct mpi3mr_ioc * mrioc,struct mpi3_nvme_encapsulated_request * nvme_encap_request,struct mpi3mr_buf_map * drv_bufs,u8 bufcnt)1955 static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc,
1956 	struct mpi3_nvme_encapsulated_request *nvme_encap_request,
1957 	struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
1958 {
1959 	struct mpi3mr_nvme_pt_sge *nvme_sgl;
1960 	__le64 sgl_dma;
1961 	u8 count;
1962 	size_t length = 0;
1963 	u16 available_sges = 0, i;
1964 	u32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge);
1965 	struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
1966 	u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
1967 			    mrioc->facts.sge_mod_shift) << 32);
1968 	u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
1969 			  mrioc->facts.sge_mod_shift) << 32;
1970 	u32 size;
1971 
1972 	nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
1973 	    ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET);
1974 
1975 	/*
1976 	 * Not all commands require a data transfer. If no data, just return
1977 	 * without constructing any sgl.
1978 	 */
1979 	for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
1980 		if (drv_buf_iter->data_dir == DMA_NONE)
1981 			continue;
1982 		length = drv_buf_iter->kern_buf_len;
1983 		break;
1984 	}
1985 	if (!length || !drv_buf_iter->num_dma_desc)
1986 		return 0;
1987 
1988 	if (drv_buf_iter->num_dma_desc == 1) {
1989 		available_sges = 1;
1990 		goto build_sges;
1991 	}
1992 
1993 	sgl_dma = cpu_to_le64(mrioc->ioctl_chain_sge.dma_addr);
1994 	if (sgl_dma & sgemod_mask) {
1995 		dprint_bsg_err(mrioc,
1996 		    "%s: SGL chain address collides with SGE modifier\n",
1997 		    __func__);
1998 		return -1;
1999 	}
2000 
2001 	sgl_dma &= ~sgemod_mask;
2002 	sgl_dma |= sgemod_val;
2003 
2004 	memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size);
2005 	available_sges = mrioc->ioctl_chain_sge.size / sge_element_size;
2006 	if (available_sges < drv_buf_iter->num_dma_desc)
2007 		return -1;
2008 	memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge));
2009 	nvme_sgl->base_addr = sgl_dma;
2010 	size = drv_buf_iter->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge);
2011 	nvme_sgl->length = cpu_to_le32(size);
2012 	nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT;
2013 	nvme_sgl = (struct mpi3mr_nvme_pt_sge *)mrioc->ioctl_chain_sge.addr;
2014 
2015 build_sges:
2016 	for (i = 0; i < drv_buf_iter->num_dma_desc; i++) {
2017 		sgl_dma = cpu_to_le64(drv_buf_iter->dma_desc[i].dma_addr);
2018 		if (sgl_dma & sgemod_mask) {
2019 			dprint_bsg_err(mrioc,
2020 				       "%s: SGL address collides with SGE modifier\n",
2021 				       __func__);
2022 		return -1;
2023 		}
2024 
2025 		sgl_dma &= ~sgemod_mask;
2026 		sgl_dma |= sgemod_val;
2027 
2028 		nvme_sgl->base_addr = sgl_dma;
2029 		nvme_sgl->length = cpu_to_le32(drv_buf_iter->dma_desc[i].size);
2030 		nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT;
2031 		nvme_sgl++;
2032 		available_sges--;
2033 	}
2034 
2035 	return 0;
2036 }
2037 
2038 /**
2039  * mpi3mr_build_nvme_prp - PRP constructor for NVME
2040  *			       encapsulated request
2041  * @mrioc: Adapter instance reference
2042  * @nvme_encap_request: NVMe encapsulated MPI request
2043  * @drv_bufs: DMA address of the buffers to be placed in SGL
2044  * @bufcnt: Number of DMA buffers
2045  *
2046  * This function places the DMA address of the given buffers in
2047  * proper format as PRP entries in the given NVMe encapsulated
2048  * request.
2049  *
2050  * Return: 0 on success, -1 on failure
2051  */
mpi3mr_build_nvme_prp(struct mpi3mr_ioc * mrioc,struct mpi3_nvme_encapsulated_request * nvme_encap_request,struct mpi3mr_buf_map * drv_bufs,u8 bufcnt)2052 static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
2053 	struct mpi3_nvme_encapsulated_request *nvme_encap_request,
2054 	struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
2055 {
2056 	int prp_size = MPI3MR_NVME_PRP_SIZE;
2057 	__le64 *prp_entry, *prp1_entry, *prp2_entry;
2058 	__le64 *prp_page;
2059 	dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
2060 	u32 offset, entry_len, dev_pgsz;
2061 	u32 page_mask_result, page_mask;
2062 	size_t length = 0, desc_len;
2063 	u8 count;
2064 	struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
2065 	u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
2066 			    mrioc->facts.sge_mod_shift) << 32);
2067 	u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
2068 			  mrioc->facts.sge_mod_shift) << 32;
2069 	u16 dev_handle = nvme_encap_request->dev_handle;
2070 	struct mpi3mr_tgt_dev *tgtdev;
2071 	u16 desc_count = 0;
2072 
2073 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
2074 	if (!tgtdev) {
2075 		dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n",
2076 			__func__, dev_handle);
2077 		return -1;
2078 	}
2079 
2080 	if (tgtdev->dev_spec.pcie_inf.pgsz == 0) {
2081 		dprint_bsg_err(mrioc,
2082 		    "%s: NVMe device page size is zero for handle 0x%04x\n",
2083 		    __func__, dev_handle);
2084 		mpi3mr_tgtdev_put(tgtdev);
2085 		return -1;
2086 	}
2087 
2088 	dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz);
2089 	mpi3mr_tgtdev_put(tgtdev);
2090 	page_mask = dev_pgsz - 1;
2091 
2092 	if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE) {
2093 		dprint_bsg_err(mrioc,
2094 			       "%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n",
2095 			       __func__, dev_pgsz,  MPI3MR_IOCTL_SGE_SIZE, dev_handle);
2096 		return -1;
2097 	}
2098 
2099 	if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz) {
2100 		dprint_bsg_err(mrioc,
2101 			       "%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n",
2102 			       __func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle);
2103 		return -1;
2104 	}
2105 
2106 	/*
2107 	 * Not all commands require a data transfer. If no data, just return
2108 	 * without constructing any PRP.
2109 	 */
2110 	for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
2111 		if (drv_buf_iter->data_dir == DMA_NONE)
2112 			continue;
2113 		length = drv_buf_iter->kern_buf_len;
2114 		break;
2115 	}
2116 
2117 	if (!length || !drv_buf_iter->num_dma_desc)
2118 		return 0;
2119 
2120 	for (count = 0; count < drv_buf_iter->num_dma_desc; count++) {
2121 		dma_addr = drv_buf_iter->dma_desc[count].dma_addr;
2122 		if (dma_addr & page_mask) {
2123 			dprint_bsg_err(mrioc,
2124 				       "%s:dma_addr %pad is not aligned with page size 0x%x\n",
2125 				       __func__,  &dma_addr, dev_pgsz);
2126 			return -1;
2127 		}
2128 	}
2129 
2130 	dma_addr = drv_buf_iter->dma_desc[0].dma_addr;
2131 	desc_len = drv_buf_iter->dma_desc[0].size;
2132 
2133 	mrioc->prp_sz = 0;
2134 	mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev,
2135 	    dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL);
2136 
2137 	if (!mrioc->prp_list_virt)
2138 		return -1;
2139 	mrioc->prp_sz = dev_pgsz;
2140 
2141 	/*
2142 	 * Set pointers to PRP1 and PRP2, which are in the NVMe command.
2143 	 * PRP1 is located at a 24 byte offset from the start of the NVMe
2144 	 * command.  Then set the current PRP entry pointer to PRP1.
2145 	 */
2146 	prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
2147 	    MPI3MR_NVME_CMD_PRP1_OFFSET);
2148 	prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
2149 	    MPI3MR_NVME_CMD_PRP2_OFFSET);
2150 	prp_entry = prp1_entry;
2151 	/*
2152 	 * For the PRP entries, use the specially allocated buffer of
2153 	 * contiguous memory.
2154 	 */
2155 	prp_page = (__le64 *)mrioc->prp_list_virt;
2156 	prp_page_dma = mrioc->prp_list_dma;
2157 
2158 	/*
2159 	 * Check if we are within 1 entry of a page boundary we don't
2160 	 * want our first entry to be a PRP List entry.
2161 	 */
2162 	page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2163 	if (!page_mask_result) {
2164 		dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n",
2165 		    __func__);
2166 		goto err_out;
2167 	}
2168 
2169 	/*
2170 	 * Set PRP physical pointer, which initially points to the current PRP
2171 	 * DMA memory page.
2172 	 */
2173 	prp_entry_dma = prp_page_dma;
2174 
2175 
2176 	/* Loop while the length is not zero. */
2177 	while (length) {
2178 		page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2179 		if (!page_mask_result && (length >  dev_pgsz)) {
2180 			dprint_bsg_err(mrioc,
2181 			    "%s: single PRP page is not sufficient\n",
2182 			    __func__);
2183 			goto err_out;
2184 		}
2185 
2186 		/* Need to handle if entry will be part of a page. */
2187 		offset = dma_addr & page_mask;
2188 		entry_len = dev_pgsz - offset;
2189 
2190 		if (prp_entry == prp1_entry) {
2191 			/*
2192 			 * Must fill in the first PRP pointer (PRP1) before
2193 			 * moving on.
2194 			 */
2195 			*prp1_entry = cpu_to_le64(dma_addr);
2196 			if (*prp1_entry & sgemod_mask) {
2197 				dprint_bsg_err(mrioc,
2198 				    "%s: PRP1 address collides with SGE modifier\n",
2199 				    __func__);
2200 				goto err_out;
2201 			}
2202 			*prp1_entry &= ~sgemod_mask;
2203 			*prp1_entry |= sgemod_val;
2204 
2205 			/*
2206 			 * Now point to the second PRP entry within the
2207 			 * command (PRP2).
2208 			 */
2209 			prp_entry = prp2_entry;
2210 		} else if (prp_entry == prp2_entry) {
2211 			/*
2212 			 * Should the PRP2 entry be a PRP List pointer or just
2213 			 * a regular PRP pointer?  If there is more than one
2214 			 * more page of data, must use a PRP List pointer.
2215 			 */
2216 			if (length > dev_pgsz) {
2217 				/*
2218 				 * PRP2 will contain a PRP List pointer because
2219 				 * more PRP's are needed with this command. The
2220 				 * list will start at the beginning of the
2221 				 * contiguous buffer.
2222 				 */
2223 				*prp2_entry = cpu_to_le64(prp_entry_dma);
2224 				if (*prp2_entry & sgemod_mask) {
2225 					dprint_bsg_err(mrioc,
2226 					    "%s: PRP list address collides with SGE modifier\n",
2227 					    __func__);
2228 					goto err_out;
2229 				}
2230 				*prp2_entry &= ~sgemod_mask;
2231 				*prp2_entry |= sgemod_val;
2232 
2233 				/*
2234 				 * The next PRP Entry will be the start of the
2235 				 * first PRP List.
2236 				 */
2237 				prp_entry = prp_page;
2238 				continue;
2239 			} else {
2240 				/*
2241 				 * After this, the PRP Entries are complete.
2242 				 * This command uses 2 PRP's and no PRP list.
2243 				 */
2244 				*prp2_entry = cpu_to_le64(dma_addr);
2245 				if (*prp2_entry & sgemod_mask) {
2246 					dprint_bsg_err(mrioc,
2247 					    "%s: PRP2 collides with SGE modifier\n",
2248 					    __func__);
2249 					goto err_out;
2250 				}
2251 				*prp2_entry &= ~sgemod_mask;
2252 				*prp2_entry |= sgemod_val;
2253 			}
2254 		} else {
2255 			/*
2256 			 * Put entry in list and bump the addresses.
2257 			 *
2258 			 * After PRP1 and PRP2 are filled in, this will fill in
2259 			 * all remaining PRP entries in a PRP List, one per
2260 			 * each time through the loop.
2261 			 */
2262 			*prp_entry = cpu_to_le64(dma_addr);
2263 			if (*prp_entry & sgemod_mask) {
2264 				dprint_bsg_err(mrioc,
2265 				    "%s: PRP address collides with SGE modifier\n",
2266 				    __func__);
2267 				goto err_out;
2268 			}
2269 			*prp_entry &= ~sgemod_mask;
2270 			*prp_entry |= sgemod_val;
2271 			prp_entry++;
2272 			prp_entry_dma += prp_size;
2273 		}
2274 
2275 		/* decrement length accounting for last partial page. */
2276 		if (entry_len >= length) {
2277 			length = 0;
2278 		} else {
2279 			if (entry_len <= desc_len) {
2280 				dma_addr += entry_len;
2281 				desc_len -= entry_len;
2282 			}
2283 			if (!desc_len) {
2284 				if ((++desc_count) >=
2285 				   drv_buf_iter->num_dma_desc) {
2286 					dprint_bsg_err(mrioc,
2287 						       "%s: Invalid len %zd while building PRP\n",
2288 						       __func__, length);
2289 					goto err_out;
2290 				}
2291 				dma_addr =
2292 				    drv_buf_iter->dma_desc[desc_count].dma_addr;
2293 				desc_len =
2294 				    drv_buf_iter->dma_desc[desc_count].size;
2295 			}
2296 			length -= entry_len;
2297 		}
2298 	}
2299 
2300 	return 0;
2301 err_out:
2302 	if (mrioc->prp_list_virt) {
2303 		dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
2304 		    mrioc->prp_list_virt, mrioc->prp_list_dma);
2305 		mrioc->prp_list_virt = NULL;
2306 	}
2307 	return -1;
2308 }
2309 
2310 /**
2311  * mpi3mr_map_data_buffer_dma - build dma descriptors for data
2312  *                              buffers
2313  * @mrioc: Adapter instance reference
2314  * @drv_buf: buffer map descriptor
2315  * @desc_count: Number of already consumed dma descriptors
2316  *
2317  * This function computes how many pre-allocated DMA descriptors
2318  * are required for the given data buffer and if those number of
2319  * descriptors are free, then setup the mapping of the scattered
2320  * DMA address to the given data buffer, if the data direction
2321  * of the buffer is DMA_TO_DEVICE then the actual data is copied to
2322  * the DMA buffers
2323  *
2324  * Return: 0 on success, -1 on failure
2325  */
mpi3mr_map_data_buffer_dma(struct mpi3mr_ioc * mrioc,struct mpi3mr_buf_map * drv_buf,u16 desc_count)2326 static int mpi3mr_map_data_buffer_dma(struct mpi3mr_ioc *mrioc,
2327 				      struct mpi3mr_buf_map *drv_buf,
2328 				      u16 desc_count)
2329 {
2330 	u16 i, needed_desc = drv_buf->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE;
2331 	u32 buf_len = drv_buf->kern_buf_len, copied_len = 0;
2332 
2333 	if (drv_buf->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE)
2334 		needed_desc++;
2335 	if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) {
2336 		dprint_bsg_err(mrioc, "%s: DMA descriptor mapping error %d:%d:%d\n",
2337 			       __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE);
2338 		return -1;
2339 	}
2340 	drv_buf->dma_desc = kzalloc(sizeof(*drv_buf->dma_desc) * needed_desc,
2341 				    GFP_KERNEL);
2342 	if (!drv_buf->dma_desc)
2343 		return -1;
2344 	for (i = 0; i < needed_desc; i++, desc_count++) {
2345 		drv_buf->dma_desc[i].addr = mrioc->ioctl_sge[desc_count].addr;
2346 		drv_buf->dma_desc[i].dma_addr =
2347 		    mrioc->ioctl_sge[desc_count].dma_addr;
2348 		if (buf_len < mrioc->ioctl_sge[desc_count].size)
2349 			drv_buf->dma_desc[i].size = buf_len;
2350 		else
2351 			drv_buf->dma_desc[i].size =
2352 			    mrioc->ioctl_sge[desc_count].size;
2353 		buf_len -= drv_buf->dma_desc[i].size;
2354 		memset(drv_buf->dma_desc[i].addr, 0,
2355 		       mrioc->ioctl_sge[desc_count].size);
2356 		if (drv_buf->data_dir == DMA_TO_DEVICE) {
2357 			memcpy(drv_buf->dma_desc[i].addr,
2358 			       drv_buf->bsg_buf + copied_len,
2359 			       drv_buf->dma_desc[i].size);
2360 			copied_len += drv_buf->dma_desc[i].size;
2361 		}
2362 	}
2363 	drv_buf->num_dma_desc = needed_desc;
2364 	return 0;
2365 }
2366 /**
2367  * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler
2368  * @job: BSG job reference
2369  *
2370  * This function is the top level handler for MPI Pass through
2371  * command, this does basic validation of the input data buffers,
2372  * identifies the given buffer types and MPI command, allocates
2373  * DMAable memory for user given buffers, construstcs SGL
2374  * properly and passes the command to the firmware.
2375  *
2376  * Once the MPI command is completed the driver copies the data
2377  * if any and reply, sense information to user provided buffers.
2378  * If the command is timed out then issues controller reset
2379  * prior to returning.
2380  *
2381  * Return: 0 on success and proper error codes on failure
2382  */
2383 
mpi3mr_bsg_process_mpt_cmds(struct bsg_job * job)2384 static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
2385 {
2386 	long rval = -EINVAL;
2387 	struct mpi3mr_ioc *mrioc = NULL;
2388 	u8 *mpi_req = NULL, *sense_buff_k = NULL;
2389 	u8 mpi_msg_size = 0;
2390 	struct mpi3mr_bsg_packet *bsg_req = NULL;
2391 	struct mpi3mr_bsg_mptcmd *karg;
2392 	struct mpi3mr_buf_entry *buf_entries = NULL;
2393 	struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL;
2394 	u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0;
2395 	u8 din_cnt = 0, dout_cnt = 0;
2396 	u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF;
2397 	u8 block_io = 0, nvme_fmt = 0, resp_code = 0;
2398 	struct mpi3_request_header *mpi_header = NULL;
2399 	struct mpi3_status_reply_descriptor *status_desc;
2400 	struct mpi3_scsi_task_mgmt_request *tm_req;
2401 	u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen;
2402 	u16 dev_handle;
2403 	struct mpi3mr_tgt_dev *tgtdev;
2404 	struct mpi3mr_stgt_priv_data *stgt_priv = NULL;
2405 	struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL;
2406 	u32 din_size = 0, dout_size = 0;
2407 	u8 *din_buf = NULL, *dout_buf = NULL;
2408 	u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL;
2409 	u16 rmc_size  = 0, desc_count = 0;
2410 
2411 	bsg_req = job->request;
2412 	karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd;
2413 
2414 	mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id);
2415 	if (!mrioc)
2416 		return -ENODEV;
2417 
2418 	if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex))
2419 		return -ERESTARTSYS;
2420 
2421 	if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) {
2422 		dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
2423 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2424 		return -EAGAIN;
2425 	}
2426 
2427 	if (!mrioc->ioctl_sges_allocated) {
2428 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2429 		dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n",
2430 			       __func__);
2431 		return -ENOMEM;
2432 	}
2433 
2434 	if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT)
2435 		karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT;
2436 
2437 	mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL);
2438 	if (!mpi_req) {
2439 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2440 		return -ENOMEM;
2441 	}
2442 	mpi_header = (struct mpi3_request_header *)mpi_req;
2443 
2444 	bufcnt = karg->buf_entry_list.num_of_entries;
2445 	drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL);
2446 	if (!drv_bufs) {
2447 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2448 		rval = -ENOMEM;
2449 		goto out;
2450 	}
2451 
2452 	dout_buf = kzalloc(job->request_payload.payload_len,
2453 				      GFP_KERNEL);
2454 	if (!dout_buf) {
2455 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2456 		rval = -ENOMEM;
2457 		goto out;
2458 	}
2459 
2460 	din_buf = kzalloc(job->reply_payload.payload_len,
2461 				     GFP_KERNEL);
2462 	if (!din_buf) {
2463 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2464 		rval = -ENOMEM;
2465 		goto out;
2466 	}
2467 
2468 	sg_copy_to_buffer(job->request_payload.sg_list,
2469 			  job->request_payload.sg_cnt,
2470 			  dout_buf, job->request_payload.payload_len);
2471 
2472 	buf_entries = karg->buf_entry_list.buf_entry;
2473 	sgl_din_iter = din_buf;
2474 	sgl_dout_iter = dout_buf;
2475 	drv_buf_iter = drv_bufs;
2476 
2477 	for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) {
2478 
2479 		switch (buf_entries->buf_type) {
2480 		case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD:
2481 			sgl_iter = sgl_dout_iter;
2482 			sgl_dout_iter += buf_entries->buf_len;
2483 			drv_buf_iter->data_dir = DMA_TO_DEVICE;
2484 			is_rmcb = 1;
2485 			if ((count != 0) || !buf_entries->buf_len)
2486 				invalid_be = 1;
2487 			break;
2488 		case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP:
2489 			sgl_iter = sgl_din_iter;
2490 			sgl_din_iter += buf_entries->buf_len;
2491 			drv_buf_iter->data_dir = DMA_FROM_DEVICE;
2492 			is_rmrb = 1;
2493 			if (count != 1 || !is_rmcb || !buf_entries->buf_len)
2494 				invalid_be = 1;
2495 			break;
2496 		case MPI3MR_BSG_BUFTYPE_DATA_IN:
2497 			sgl_iter = sgl_din_iter;
2498 			sgl_din_iter += buf_entries->buf_len;
2499 			drv_buf_iter->data_dir = DMA_FROM_DEVICE;
2500 			din_cnt++;
2501 			din_size += buf_entries->buf_len;
2502 			if ((din_cnt > 1) && !is_rmcb)
2503 				invalid_be = 1;
2504 			break;
2505 		case MPI3MR_BSG_BUFTYPE_DATA_OUT:
2506 			sgl_iter = sgl_dout_iter;
2507 			sgl_dout_iter += buf_entries->buf_len;
2508 			drv_buf_iter->data_dir = DMA_TO_DEVICE;
2509 			dout_cnt++;
2510 			dout_size += buf_entries->buf_len;
2511 			if ((dout_cnt > 1) && !is_rmcb)
2512 				invalid_be = 1;
2513 			break;
2514 		case MPI3MR_BSG_BUFTYPE_MPI_REPLY:
2515 			sgl_iter = sgl_din_iter;
2516 			sgl_din_iter += buf_entries->buf_len;
2517 			drv_buf_iter->data_dir = DMA_NONE;
2518 			mpirep_offset = count;
2519 			if (!buf_entries->buf_len)
2520 				invalid_be = 1;
2521 			break;
2522 		case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE:
2523 			sgl_iter = sgl_din_iter;
2524 			sgl_din_iter += buf_entries->buf_len;
2525 			drv_buf_iter->data_dir = DMA_NONE;
2526 			erb_offset = count;
2527 			if (!buf_entries->buf_len)
2528 				invalid_be = 1;
2529 			break;
2530 		case MPI3MR_BSG_BUFTYPE_MPI_REQUEST:
2531 			sgl_iter = sgl_dout_iter;
2532 			sgl_dout_iter += buf_entries->buf_len;
2533 			drv_buf_iter->data_dir = DMA_NONE;
2534 			mpi_msg_size = buf_entries->buf_len;
2535 			if ((!mpi_msg_size || (mpi_msg_size % 4)) ||
2536 					(mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) {
2537 				dprint_bsg_err(mrioc, "%s: invalid MPI message size\n",
2538 					__func__);
2539 				mutex_unlock(&mrioc->bsg_cmds.mutex);
2540 				rval = -EINVAL;
2541 				goto out;
2542 			}
2543 			memcpy(mpi_req, sgl_iter, buf_entries->buf_len);
2544 			break;
2545 		default:
2546 			invalid_be = 1;
2547 			break;
2548 		}
2549 		if (invalid_be) {
2550 			dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n",
2551 				__func__);
2552 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2553 			rval = -EINVAL;
2554 			goto out;
2555 		}
2556 
2557 		if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
2558 			dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
2559 				       __func__);
2560 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2561 			rval = -EINVAL;
2562 			goto out;
2563 		}
2564 		if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
2565 			dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
2566 				       __func__);
2567 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2568 			rval = -EINVAL;
2569 			goto out;
2570 		}
2571 
2572 		drv_buf_iter->bsg_buf = sgl_iter;
2573 		drv_buf_iter->bsg_buf_len = buf_entries->buf_len;
2574 	}
2575 
2576 	if (is_rmcb && ((din_size + dout_size) > MPI3MR_MAX_APP_XFER_SIZE)) {
2577 		dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n",
2578 			       __func__, __LINE__, mpi_header->function, din_size,
2579 			       dout_size);
2580 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2581 		rval = -EINVAL;
2582 		goto out;
2583 	}
2584 
2585 	if (din_size > MPI3MR_MAX_APP_XFER_SIZE) {
2586 		dprint_bsg_err(mrioc,
2587 		    "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n",
2588 		    __func__, __LINE__, mpi_header->function, din_size);
2589 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2590 		rval = -EINVAL;
2591 		goto out;
2592 	}
2593 	if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) {
2594 		dprint_bsg_err(mrioc,
2595 		    "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n",
2596 		    __func__, __LINE__, mpi_header->function, dout_size);
2597 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2598 		rval = -EINVAL;
2599 		goto out;
2600 	}
2601 
2602 	if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) {
2603 		if (din_size > MPI3MR_IOCTL_SGE_SIZE ||
2604 		    dout_size > MPI3MR_IOCTL_SGE_SIZE) {
2605 			dprint_bsg_err(mrioc, "%s:%d: invalid message size passed:%d:%d:%d:%d\n",
2606 				       __func__, __LINE__, din_cnt, dout_cnt, din_size,
2607 			    dout_size);
2608 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2609 			rval = -EINVAL;
2610 			goto out;
2611 		}
2612 	}
2613 
2614 	drv_buf_iter = drv_bufs;
2615 	for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
2616 		if (drv_buf_iter->data_dir == DMA_NONE)
2617 			continue;
2618 
2619 		drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len;
2620 		if (is_rmcb && !count) {
2621 			drv_buf_iter->kern_buf_len =
2622 			    mrioc->ioctl_chain_sge.size;
2623 			drv_buf_iter->kern_buf =
2624 			    mrioc->ioctl_chain_sge.addr;
2625 			drv_buf_iter->kern_buf_dma =
2626 			    mrioc->ioctl_chain_sge.dma_addr;
2627 			drv_buf_iter->dma_desc = NULL;
2628 			drv_buf_iter->num_dma_desc = 0;
2629 			memset(drv_buf_iter->kern_buf, 0,
2630 			       drv_buf_iter->kern_buf_len);
2631 			tmplen = min(drv_buf_iter->kern_buf_len,
2632 				     drv_buf_iter->bsg_buf_len);
2633 			rmc_size = tmplen;
2634 			memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen);
2635 		} else if (is_rmrb && (count == 1)) {
2636 			drv_buf_iter->kern_buf_len =
2637 			    mrioc->ioctl_resp_sge.size;
2638 			drv_buf_iter->kern_buf =
2639 			    mrioc->ioctl_resp_sge.addr;
2640 			drv_buf_iter->kern_buf_dma =
2641 			    mrioc->ioctl_resp_sge.dma_addr;
2642 			drv_buf_iter->dma_desc = NULL;
2643 			drv_buf_iter->num_dma_desc = 0;
2644 			memset(drv_buf_iter->kern_buf, 0,
2645 			       drv_buf_iter->kern_buf_len);
2646 			tmplen = min(drv_buf_iter->kern_buf_len,
2647 				     drv_buf_iter->bsg_buf_len);
2648 			drv_buf_iter->kern_buf_len = tmplen;
2649 			memset(drv_buf_iter->bsg_buf, 0,
2650 			       drv_buf_iter->bsg_buf_len);
2651 		} else {
2652 			if (!drv_buf_iter->kern_buf_len)
2653 				continue;
2654 			if (mpi3mr_map_data_buffer_dma(mrioc, drv_buf_iter, desc_count)) {
2655 				rval = -ENOMEM;
2656 				mutex_unlock(&mrioc->bsg_cmds.mutex);
2657 				dprint_bsg_err(mrioc, "%s:%d: mapping data buffers failed\n",
2658 					       __func__, __LINE__);
2659 			goto out;
2660 		}
2661 			desc_count += drv_buf_iter->num_dma_desc;
2662 		}
2663 	}
2664 
2665 	if (erb_offset != 0xFF) {
2666 		sense_buff_k = kzalloc(erbsz, GFP_KERNEL);
2667 		if (!sense_buff_k) {
2668 			rval = -ENOMEM;
2669 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2670 			goto out;
2671 		}
2672 	}
2673 
2674 	if (mrioc->unrecoverable) {
2675 		dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
2676 		    __func__);
2677 		rval = -EFAULT;
2678 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2679 		goto out;
2680 	}
2681 	if (mrioc->reset_in_progress) {
2682 		dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
2683 		rval = -EAGAIN;
2684 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2685 		goto out;
2686 	}
2687 	if (mrioc->stop_bsgs || mrioc->block_on_pci_err) {
2688 		dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
2689 		rval = -EAGAIN;
2690 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2691 		goto out;
2692 	}
2693 
2694 	if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) {
2695 		nvme_fmt = mpi3mr_get_nvme_data_fmt(
2696 			(struct mpi3_nvme_encapsulated_request *)mpi_req);
2697 		if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) {
2698 			if (mpi3mr_build_nvme_prp(mrioc,
2699 			    (struct mpi3_nvme_encapsulated_request *)mpi_req,
2700 			    drv_bufs, bufcnt)) {
2701 				rval = -ENOMEM;
2702 				mutex_unlock(&mrioc->bsg_cmds.mutex);
2703 				goto out;
2704 			}
2705 		} else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 ||
2706 			nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) {
2707 			if (mpi3mr_build_nvme_sgl(mrioc,
2708 			    (struct mpi3_nvme_encapsulated_request *)mpi_req,
2709 			    drv_bufs, bufcnt)) {
2710 				rval = -EINVAL;
2711 				mutex_unlock(&mrioc->bsg_cmds.mutex);
2712 				goto out;
2713 			}
2714 		} else {
2715 			dprint_bsg_err(mrioc,
2716 			    "%s:invalid NVMe command format\n", __func__);
2717 			rval = -EINVAL;
2718 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2719 			goto out;
2720 		}
2721 	} else {
2722 		if (mpi3mr_bsg_build_sgl(mrioc, mpi_req, mpi_msg_size,
2723 					 drv_bufs, bufcnt, is_rmcb, is_rmrb,
2724 					 (dout_cnt + din_cnt))) {
2725 			dprint_bsg_err(mrioc, "%s: sgl build failed\n", __func__);
2726 			rval = -EAGAIN;
2727 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2728 			goto out;
2729 		}
2730 	}
2731 
2732 	if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) {
2733 		tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req;
2734 		if (tm_req->task_type !=
2735 		    MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
2736 			dev_handle = tm_req->dev_handle;
2737 			block_io = 1;
2738 		}
2739 	}
2740 	if (block_io) {
2741 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
2742 		if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
2743 			stgt_priv = (struct mpi3mr_stgt_priv_data *)
2744 			    tgtdev->starget->hostdata;
2745 			atomic_inc(&stgt_priv->block_io);
2746 			mpi3mr_tgtdev_put(tgtdev);
2747 		}
2748 	}
2749 
2750 	mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING;
2751 	mrioc->bsg_cmds.is_waiting = 1;
2752 	mrioc->bsg_cmds.callback = NULL;
2753 	mrioc->bsg_cmds.is_sense = 0;
2754 	mrioc->bsg_cmds.sensebuf = sense_buff_k;
2755 	memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz);
2756 	mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS);
2757 	if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) {
2758 		dprint_bsg_info(mrioc,
2759 		    "%s: posting bsg request to the controller\n", __func__);
2760 		dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
2761 		    "bsg_mpi3_req");
2762 		if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
2763 			drv_buf_iter = &drv_bufs[0];
2764 			dprint_dump(drv_buf_iter->kern_buf,
2765 			    rmc_size, "mpi3_mgmt_req");
2766 		}
2767 	}
2768 
2769 	init_completion(&mrioc->bsg_cmds.done);
2770 	rval = mpi3mr_admin_request_post(mrioc, mpi_req,
2771 	    MPI3MR_ADMIN_REQ_FRAME_SZ, 0);
2772 
2773 
2774 	if (rval) {
2775 		mrioc->bsg_cmds.is_waiting = 0;
2776 		dprint_bsg_err(mrioc,
2777 		    "%s: posting bsg request is failed\n", __func__);
2778 		rval = -EAGAIN;
2779 		goto out_unlock;
2780 	}
2781 	wait_for_completion_timeout(&mrioc->bsg_cmds.done,
2782 	    (karg->timeout * HZ));
2783 	if (block_io && stgt_priv)
2784 		atomic_dec(&stgt_priv->block_io);
2785 	if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) {
2786 		mrioc->bsg_cmds.is_waiting = 0;
2787 		rval = -EAGAIN;
2788 		if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)
2789 			goto out_unlock;
2790 		if (((mpi_header->function != MPI3_FUNCTION_SCSI_IO) &&
2791 		    (mpi_header->function != MPI3_FUNCTION_NVME_ENCAPSULATED))
2792 		    || (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR)) {
2793 			ioc_info(mrioc, "%s: bsg request timedout after %d seconds\n",
2794 			    __func__, karg->timeout);
2795 			if (!(mrioc->logging_level & MPI3_DEBUG_BSG_INFO)) {
2796 				dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
2797 			    "bsg_mpi3_req");
2798 			if (mpi_header->function ==
2799 			    MPI3_FUNCTION_MGMT_PASSTHROUGH) {
2800 				drv_buf_iter = &drv_bufs[0];
2801 				dprint_dump(drv_buf_iter->kern_buf,
2802 				    rmc_size, "mpi3_mgmt_req");
2803 				}
2804 			}
2805 		}
2806 		if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) ||
2807 			(mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO)) {
2808 			dprint_bsg_err(mrioc, "%s: bsg request timedout after %d seconds,\n"
2809 				"issuing target reset to (0x%04x)\n", __func__,
2810 				karg->timeout, mpi_header->function_dependent);
2811 			mpi3mr_issue_tm(mrioc,
2812 			    MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
2813 			    mpi_header->function_dependent, 0,
2814 			    MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT,
2815 			    &mrioc->host_tm_cmds, &resp_code, NULL);
2816 		}
2817 		if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) &&
2818 		    !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET))
2819 			mpi3mr_soft_reset_handler(mrioc,
2820 			    MPI3MR_RESET_FROM_APP_TIMEOUT, 1);
2821 		goto out_unlock;
2822 	}
2823 	dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__);
2824 
2825 	if (mrioc->prp_list_virt) {
2826 		dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
2827 		    mrioc->prp_list_virt, mrioc->prp_list_dma);
2828 		mrioc->prp_list_virt = NULL;
2829 	}
2830 
2831 	if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2832 	     != MPI3_IOCSTATUS_SUCCESS) {
2833 		dprint_bsg_info(mrioc,
2834 		    "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
2835 		    __func__,
2836 		    (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2837 		    mrioc->bsg_cmds.ioc_loginfo);
2838 	}
2839 
2840 	if ((mpirep_offset != 0xFF) &&
2841 	    drv_bufs[mpirep_offset].bsg_buf_len) {
2842 		drv_buf_iter = &drv_bufs[mpirep_offset];
2843 		drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) +
2844 					   mrioc->reply_sz);
2845 		bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL);
2846 
2847 		if (!bsg_reply_buf) {
2848 			rval = -ENOMEM;
2849 			goto out_unlock;
2850 		}
2851 		if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) {
2852 			bsg_reply_buf->mpi_reply_type =
2853 				MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS;
2854 			memcpy(bsg_reply_buf->reply_buf,
2855 			    mrioc->bsg_cmds.reply, mrioc->reply_sz);
2856 		} else {
2857 			bsg_reply_buf->mpi_reply_type =
2858 				MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS;
2859 			status_desc = (struct mpi3_status_reply_descriptor *)
2860 			    bsg_reply_buf->reply_buf;
2861 			status_desc->ioc_status = mrioc->bsg_cmds.ioc_status;
2862 			status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo;
2863 		}
2864 		tmplen = min(drv_buf_iter->kern_buf_len,
2865 			drv_buf_iter->bsg_buf_len);
2866 		memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen);
2867 	}
2868 
2869 	if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf &&
2870 	    mrioc->bsg_cmds.is_sense) {
2871 		drv_buf_iter = &drv_bufs[erb_offset];
2872 		tmplen = min(erbsz, drv_buf_iter->bsg_buf_len);
2873 		memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen);
2874 	}
2875 
2876 	drv_buf_iter = drv_bufs;
2877 	for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
2878 		if (drv_buf_iter->data_dir == DMA_NONE)
2879 			continue;
2880 		if ((count == 1) && is_rmrb) {
2881 			memcpy(drv_buf_iter->bsg_buf,
2882 			    drv_buf_iter->kern_buf,
2883 			    drv_buf_iter->kern_buf_len);
2884 		} else if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) {
2885 			tmplen = 0;
2886 			for (desc_count = 0;
2887 			    desc_count < drv_buf_iter->num_dma_desc;
2888 			    desc_count++) {
2889 				memcpy(((u8 *)drv_buf_iter->bsg_buf + tmplen),
2890 				       drv_buf_iter->dma_desc[desc_count].addr,
2891 				       drv_buf_iter->dma_desc[desc_count].size);
2892 				tmplen +=
2893 				    drv_buf_iter->dma_desc[desc_count].size;
2894 		}
2895 	}
2896 	}
2897 
2898 out_unlock:
2899 	if (din_buf) {
2900 		job->reply_payload_rcv_len =
2901 			sg_copy_from_buffer(job->reply_payload.sg_list,
2902 					    job->reply_payload.sg_cnt,
2903 					    din_buf, job->reply_payload.payload_len);
2904 	}
2905 	mrioc->bsg_cmds.is_sense = 0;
2906 	mrioc->bsg_cmds.sensebuf = NULL;
2907 	mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED;
2908 	mutex_unlock(&mrioc->bsg_cmds.mutex);
2909 out:
2910 	kfree(sense_buff_k);
2911 	kfree(dout_buf);
2912 	kfree(din_buf);
2913 	kfree(mpi_req);
2914 	if (drv_bufs) {
2915 		drv_buf_iter = drv_bufs;
2916 		for (count = 0; count < bufcnt; count++, drv_buf_iter++)
2917 			kfree(drv_buf_iter->dma_desc);
2918 		kfree(drv_bufs);
2919 	}
2920 	kfree(bsg_reply_buf);
2921 	return rval;
2922 }
2923 
2924 /**
2925  * mpi3mr_app_save_logdata - Save Log Data events
2926  * @mrioc: Adapter instance reference
2927  * @event_data: event data associated with log data event
2928  * @event_data_size: event data size to copy
2929  *
2930  * If log data event caching is enabled by the applicatiobns,
2931  * then this function saves the log data in the circular queue
2932  * and Sends async signal SIGIO to indicate there is an async
2933  * event from the firmware to the event monitoring applications.
2934  *
2935  * Return:Nothing
2936  */
mpi3mr_app_save_logdata(struct mpi3mr_ioc * mrioc,char * event_data,u16 event_data_size)2937 void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
2938 	u16 event_data_size)
2939 {
2940 	u32 index = mrioc->logdata_buf_idx, sz;
2941 	struct mpi3mr_logdata_entry *entry;
2942 
2943 	if (!(mrioc->logdata_buf))
2944 		return;
2945 
2946 	entry = (struct mpi3mr_logdata_entry *)
2947 		(mrioc->logdata_buf + (index * mrioc->logdata_entry_sz));
2948 	entry->valid_entry = 1;
2949 	sz = min(mrioc->logdata_entry_sz, event_data_size);
2950 	memcpy(entry->data, event_data, sz);
2951 	mrioc->logdata_buf_idx =
2952 		((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES);
2953 	atomic64_inc(&event_counter);
2954 }
2955 
2956 /**
2957  * mpi3mr_bsg_request - bsg request entry point
2958  * @job: BSG job reference
2959  *
2960  * This is driver's entry point for bsg requests
2961  *
2962  * Return: 0 on success and proper error codes on failure
2963  */
mpi3mr_bsg_request(struct bsg_job * job)2964 static int mpi3mr_bsg_request(struct bsg_job *job)
2965 {
2966 	long rval = -EINVAL;
2967 	unsigned int reply_payload_rcv_len = 0;
2968 
2969 	struct mpi3mr_bsg_packet *bsg_req = job->request;
2970 
2971 	switch (bsg_req->cmd_type) {
2972 	case MPI3MR_DRV_CMD:
2973 		rval = mpi3mr_bsg_process_drv_cmds(job);
2974 		break;
2975 	case MPI3MR_MPT_CMD:
2976 		rval = mpi3mr_bsg_process_mpt_cmds(job);
2977 		break;
2978 	default:
2979 		pr_err("%s: unsupported BSG command(0x%08x)\n",
2980 		    MPI3MR_DRIVER_NAME, bsg_req->cmd_type);
2981 		break;
2982 	}
2983 
2984 	bsg_job_done(job, rval, reply_payload_rcv_len);
2985 
2986 	return 0;
2987 }
2988 
2989 /**
2990  * mpi3mr_bsg_exit - de-registration from bsg layer
2991  * @mrioc: Adapter instance reference
2992  *
2993  * This will be called during driver unload and all
2994  * bsg resources allocated during load will be freed.
2995  *
2996  * Return:Nothing
2997  */
mpi3mr_bsg_exit(struct mpi3mr_ioc * mrioc)2998 void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc)
2999 {
3000 	struct device *bsg_dev = &mrioc->bsg_dev;
3001 	if (!mrioc->bsg_queue)
3002 		return;
3003 
3004 	bsg_remove_queue(mrioc->bsg_queue);
3005 	mrioc->bsg_queue = NULL;
3006 
3007 	device_del(bsg_dev);
3008 	put_device(bsg_dev);
3009 }
3010 
3011 /**
3012  * mpi3mr_bsg_node_release -release bsg device node
3013  * @dev: bsg device node
3014  *
3015  * decrements bsg dev parent reference count
3016  *
3017  * Return:Nothing
3018  */
mpi3mr_bsg_node_release(struct device * dev)3019 static void mpi3mr_bsg_node_release(struct device *dev)
3020 {
3021 	put_device(dev->parent);
3022 }
3023 
3024 /**
3025  * mpi3mr_bsg_init -  registration with bsg layer
3026  * @mrioc: Adapter instance reference
3027  *
3028  * This will be called during driver load and it will
3029  * register driver with bsg layer
3030  *
3031  * Return:Nothing
3032  */
mpi3mr_bsg_init(struct mpi3mr_ioc * mrioc)3033 void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
3034 {
3035 	struct device *bsg_dev = &mrioc->bsg_dev;
3036 	struct device *parent = &mrioc->shost->shost_gendev;
3037 	struct queue_limits lim = {
3038 		.max_hw_sectors		= MPI3MR_MAX_APP_XFER_SECTORS,
3039 		.max_segments		= MPI3MR_MAX_APP_XFER_SEGMENTS,
3040 	};
3041 	struct request_queue *q;
3042 
3043 	device_initialize(bsg_dev);
3044 
3045 	bsg_dev->parent = get_device(parent);
3046 	bsg_dev->release = mpi3mr_bsg_node_release;
3047 
3048 	dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id);
3049 
3050 	if (device_add(bsg_dev)) {
3051 		ioc_err(mrioc, "%s: bsg device add failed\n",
3052 		    dev_name(bsg_dev));
3053 		put_device(bsg_dev);
3054 		return;
3055 	}
3056 
3057 	q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
3058 			mpi3mr_bsg_request, NULL, 0);
3059 	if (IS_ERR(q)) {
3060 		ioc_err(mrioc, "%s: bsg registration failed\n",
3061 		    dev_name(bsg_dev));
3062 		device_del(bsg_dev);
3063 		put_device(bsg_dev);
3064 		return;
3065 	}
3066 
3067 	mrioc->bsg_queue = q;
3068 }
3069 
3070 /**
3071  * version_fw_show - SysFS callback for firmware version read
3072  * @dev: class device
3073  * @attr: Device attributes
3074  * @buf: Buffer to copy
3075  *
3076  * Return: sysfs_emit() return after copying firmware version
3077  */
3078 static ssize_t
version_fw_show(struct device * dev,struct device_attribute * attr,char * buf)3079 version_fw_show(struct device *dev, struct device_attribute *attr,
3080 	char *buf)
3081 {
3082 	struct Scsi_Host *shost = class_to_shost(dev);
3083 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3084 	struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
3085 
3086 	return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n",
3087 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
3088 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
3089 }
3090 static DEVICE_ATTR_RO(version_fw);
3091 
3092 /**
3093  * fw_queue_depth_show - SysFS callback for firmware max cmds
3094  * @dev: class device
3095  * @attr: Device attributes
3096  * @buf: Buffer to copy
3097  *
3098  * Return: sysfs_emit() return after copying firmware max commands
3099  */
3100 static ssize_t
fw_queue_depth_show(struct device * dev,struct device_attribute * attr,char * buf)3101 fw_queue_depth_show(struct device *dev, struct device_attribute *attr,
3102 			char *buf)
3103 {
3104 	struct Scsi_Host *shost = class_to_shost(dev);
3105 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3106 
3107 	return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs);
3108 }
3109 static DEVICE_ATTR_RO(fw_queue_depth);
3110 
3111 /**
3112  * op_req_q_count_show - SysFS callback for request queue count
3113  * @dev: class device
3114  * @attr: Device attributes
3115  * @buf: Buffer to copy
3116  *
3117  * Return: sysfs_emit() return after copying request queue count
3118  */
3119 static ssize_t
op_req_q_count_show(struct device * dev,struct device_attribute * attr,char * buf)3120 op_req_q_count_show(struct device *dev, struct device_attribute *attr,
3121 			char *buf)
3122 {
3123 	struct Scsi_Host *shost = class_to_shost(dev);
3124 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3125 
3126 	return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q);
3127 }
3128 static DEVICE_ATTR_RO(op_req_q_count);
3129 
3130 /**
3131  * reply_queue_count_show - SysFS callback for reply queue count
3132  * @dev: class device
3133  * @attr: Device attributes
3134  * @buf: Buffer to copy
3135  *
3136  * Return: sysfs_emit() return after copying reply queue count
3137  */
3138 static ssize_t
reply_queue_count_show(struct device * dev,struct device_attribute * attr,char * buf)3139 reply_queue_count_show(struct device *dev, struct device_attribute *attr,
3140 			char *buf)
3141 {
3142 	struct Scsi_Host *shost = class_to_shost(dev);
3143 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3144 
3145 	return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q);
3146 }
3147 
3148 static DEVICE_ATTR_RO(reply_queue_count);
3149 
3150 /**
3151  * reply_qfull_count_show - Show reply qfull count
3152  * @dev: class device
3153  * @attr: Device attributes
3154  * @buf: Buffer to copy
3155  *
3156  * Retrieves the current value of the reply_qfull_count from the mrioc structure and
3157  * formats it as a string for display.
3158  *
3159  * Return: sysfs_emit() return
3160  */
3161 static ssize_t
reply_qfull_count_show(struct device * dev,struct device_attribute * attr,char * buf)3162 reply_qfull_count_show(struct device *dev, struct device_attribute *attr,
3163 			char *buf)
3164 {
3165 	struct Scsi_Host *shost = class_to_shost(dev);
3166 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3167 
3168 	return sysfs_emit(buf, "%u\n", atomic_read(&mrioc->reply_qfull_count));
3169 }
3170 
3171 static DEVICE_ATTR_RO(reply_qfull_count);
3172 
3173 /**
3174  * logging_level_show - Show controller debug level
3175  * @dev: class device
3176  * @attr: Device attributes
3177  * @buf: Buffer to copy
3178  *
3179  * A sysfs 'read/write' shost attribute, to show the current
3180  * debug log level used by the driver for the specific
3181  * controller.
3182  *
3183  * Return: sysfs_emit() return
3184  */
3185 static ssize_t
logging_level_show(struct device * dev,struct device_attribute * attr,char * buf)3186 logging_level_show(struct device *dev,
3187 	struct device_attribute *attr, char *buf)
3188 
3189 {
3190 	struct Scsi_Host *shost = class_to_shost(dev);
3191 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3192 
3193 	return sysfs_emit(buf, "%08xh\n", mrioc->logging_level);
3194 }
3195 
3196 /**
3197  * logging_level_store- Change controller debug level
3198  * @dev: class device
3199  * @attr: Device attributes
3200  * @buf: Buffer to copy
3201  * @count: size of the buffer
3202  *
3203  * A sysfs 'read/write' shost attribute, to change the current
3204  * debug log level used by the driver for the specific
3205  * controller.
3206  *
3207  * Return: strlen() return
3208  */
3209 static ssize_t
logging_level_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3210 logging_level_store(struct device *dev,
3211 	struct device_attribute *attr,
3212 	const char *buf, size_t count)
3213 {
3214 	struct Scsi_Host *shost = class_to_shost(dev);
3215 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3216 	int val = 0;
3217 
3218 	if (kstrtoint(buf, 0, &val) != 0)
3219 		return -EINVAL;
3220 
3221 	mrioc->logging_level = val;
3222 	ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level);
3223 	return strlen(buf);
3224 }
3225 static DEVICE_ATTR_RW(logging_level);
3226 
3227 /**
3228  * adp_state_show() - SysFS callback for adapter state show
3229  * @dev: class device
3230  * @attr: Device attributes
3231  * @buf: Buffer to copy
3232  *
3233  * Return: sysfs_emit() return after copying adapter state
3234  */
3235 static ssize_t
adp_state_show(struct device * dev,struct device_attribute * attr,char * buf)3236 adp_state_show(struct device *dev, struct device_attribute *attr,
3237 	char *buf)
3238 {
3239 	struct Scsi_Host *shost = class_to_shost(dev);
3240 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3241 	enum mpi3mr_iocstate ioc_state;
3242 	uint8_t adp_state;
3243 
3244 	ioc_state = mpi3mr_get_iocstate(mrioc);
3245 	if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
3246 		adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
3247 	else if (mrioc->reset_in_progress || mrioc->stop_bsgs ||
3248 		 mrioc->block_on_pci_err)
3249 		adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
3250 	else if (ioc_state == MRIOC_STATE_FAULT)
3251 		adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
3252 	else
3253 		adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
3254 
3255 	return sysfs_emit(buf, "%u\n", adp_state);
3256 }
3257 
3258 static DEVICE_ATTR_RO(adp_state);
3259 
3260 static struct attribute *mpi3mr_host_attrs[] = {
3261 	&dev_attr_version_fw.attr,
3262 	&dev_attr_fw_queue_depth.attr,
3263 	&dev_attr_op_req_q_count.attr,
3264 	&dev_attr_reply_queue_count.attr,
3265 	&dev_attr_reply_qfull_count.attr,
3266 	&dev_attr_logging_level.attr,
3267 	&dev_attr_adp_state.attr,
3268 	NULL,
3269 };
3270 
3271 static const struct attribute_group mpi3mr_host_attr_group = {
3272 	.attrs = mpi3mr_host_attrs
3273 };
3274 
3275 const struct attribute_group *mpi3mr_host_groups[] = {
3276 	&mpi3mr_host_attr_group,
3277 	NULL,
3278 };
3279 
3280 
3281 /*
3282  * SCSI Device attributes under sysfs
3283  */
3284 
3285 /**
3286  * sas_address_show - SysFS callback for dev SASaddress display
3287  * @dev: class device
3288  * @attr: Device attributes
3289  * @buf: Buffer to copy
3290  *
3291  * Return: sysfs_emit() return after copying SAS address of the
3292  * specific SAS/SATA end device.
3293  */
3294 static ssize_t
sas_address_show(struct device * dev,struct device_attribute * attr,char * buf)3295 sas_address_show(struct device *dev, struct device_attribute *attr,
3296 			char *buf)
3297 {
3298 	struct scsi_device *sdev = to_scsi_device(dev);
3299 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
3300 	struct mpi3mr_stgt_priv_data *tgt_priv_data;
3301 	struct mpi3mr_tgt_dev *tgtdev;
3302 
3303 	sdev_priv_data = sdev->hostdata;
3304 	if (!sdev_priv_data)
3305 		return 0;
3306 
3307 	tgt_priv_data = sdev_priv_data->tgt_priv_data;
3308 	if (!tgt_priv_data)
3309 		return 0;
3310 	tgtdev = tgt_priv_data->tgt_dev;
3311 	if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA)
3312 		return 0;
3313 	return sysfs_emit(buf, "0x%016llx\n",
3314 	    (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address);
3315 }
3316 
3317 static DEVICE_ATTR_RO(sas_address);
3318 
3319 /**
3320  * device_handle_show - SysFS callback for device handle display
3321  * @dev: class device
3322  * @attr: Device attributes
3323  * @buf: Buffer to copy
3324  *
3325  * Return: sysfs_emit() return after copying firmware internal
3326  * device handle of the specific device.
3327  */
3328 static ssize_t
device_handle_show(struct device * dev,struct device_attribute * attr,char * buf)3329 device_handle_show(struct device *dev, struct device_attribute *attr,
3330 			char *buf)
3331 {
3332 	struct scsi_device *sdev = to_scsi_device(dev);
3333 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
3334 	struct mpi3mr_stgt_priv_data *tgt_priv_data;
3335 	struct mpi3mr_tgt_dev *tgtdev;
3336 
3337 	sdev_priv_data = sdev->hostdata;
3338 	if (!sdev_priv_data)
3339 		return 0;
3340 
3341 	tgt_priv_data = sdev_priv_data->tgt_priv_data;
3342 	if (!tgt_priv_data)
3343 		return 0;
3344 	tgtdev = tgt_priv_data->tgt_dev;
3345 	if (!tgtdev)
3346 		return 0;
3347 	return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle);
3348 }
3349 
3350 static DEVICE_ATTR_RO(device_handle);
3351 
3352 /**
3353  * persistent_id_show - SysFS callback for persisten ID display
3354  * @dev: class device
3355  * @attr: Device attributes
3356  * @buf: Buffer to copy
3357  *
3358  * Return: sysfs_emit() return after copying persistent ID of the
3359  * of the specific device.
3360  */
3361 static ssize_t
persistent_id_show(struct device * dev,struct device_attribute * attr,char * buf)3362 persistent_id_show(struct device *dev, struct device_attribute *attr,
3363 			char *buf)
3364 {
3365 	struct scsi_device *sdev = to_scsi_device(dev);
3366 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
3367 	struct mpi3mr_stgt_priv_data *tgt_priv_data;
3368 	struct mpi3mr_tgt_dev *tgtdev;
3369 
3370 	sdev_priv_data = sdev->hostdata;
3371 	if (!sdev_priv_data)
3372 		return 0;
3373 
3374 	tgt_priv_data = sdev_priv_data->tgt_priv_data;
3375 	if (!tgt_priv_data)
3376 		return 0;
3377 	tgtdev = tgt_priv_data->tgt_dev;
3378 	if (!tgtdev)
3379 		return 0;
3380 	return sysfs_emit(buf, "%d\n", tgtdev->perst_id);
3381 }
3382 static DEVICE_ATTR_RO(persistent_id);
3383 
3384 /**
3385  * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
3386  * @dev: pointer to embedded device
3387  * @attr: sas_ncq_prio_supported attribute descriptor
3388  * @buf: the buffer returned
3389  *
3390  * A sysfs 'read-only' sdev attribute, only works with SATA devices
3391  */
3392 static ssize_t
sas_ncq_prio_supported_show(struct device * dev,struct device_attribute * attr,char * buf)3393 sas_ncq_prio_supported_show(struct device *dev,
3394 			    struct device_attribute *attr, char *buf)
3395 {
3396 	struct scsi_device *sdev = to_scsi_device(dev);
3397 
3398 	return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
3399 }
3400 static DEVICE_ATTR_RO(sas_ncq_prio_supported);
3401 
3402 /**
3403  * sas_ncq_prio_enable_show - send prioritized io commands to device
3404  * @dev: pointer to embedded device
3405  * @attr: sas_ncq_prio_enable attribute descriptor
3406  * @buf: the buffer returned
3407  *
3408  * A sysfs 'read/write' sdev attribute, only works with SATA devices
3409  */
3410 static ssize_t
sas_ncq_prio_enable_show(struct device * dev,struct device_attribute * attr,char * buf)3411 sas_ncq_prio_enable_show(struct device *dev,
3412 				 struct device_attribute *attr, char *buf)
3413 {
3414 	struct scsi_device *sdev = to_scsi_device(dev);
3415 	struct mpi3mr_sdev_priv_data *sdev_priv_data =  sdev->hostdata;
3416 
3417 	if (!sdev_priv_data)
3418 		return 0;
3419 
3420 	return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable);
3421 }
3422 
3423 static ssize_t
sas_ncq_prio_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3424 sas_ncq_prio_enable_store(struct device *dev,
3425 				  struct device_attribute *attr,
3426 				  const char *buf, size_t count)
3427 {
3428 	struct scsi_device *sdev = to_scsi_device(dev);
3429 	struct mpi3mr_sdev_priv_data *sdev_priv_data =  sdev->hostdata;
3430 	bool ncq_prio_enable = 0;
3431 
3432 	if (kstrtobool(buf, &ncq_prio_enable))
3433 		return -EINVAL;
3434 
3435 	if (!sas_ata_ncq_prio_supported(sdev))
3436 		return -EINVAL;
3437 
3438 	sdev_priv_data->ncq_prio_enable = ncq_prio_enable;
3439 
3440 	return strlen(buf);
3441 }
3442 static DEVICE_ATTR_RW(sas_ncq_prio_enable);
3443 
3444 static struct attribute *mpi3mr_dev_attrs[] = {
3445 	&dev_attr_sas_address.attr,
3446 	&dev_attr_device_handle.attr,
3447 	&dev_attr_persistent_id.attr,
3448 	&dev_attr_sas_ncq_prio_supported.attr,
3449 	&dev_attr_sas_ncq_prio_enable.attr,
3450 	NULL,
3451 };
3452 
3453 static const struct attribute_group mpi3mr_dev_attr_group = {
3454 	.attrs = mpi3mr_dev_attrs
3455 };
3456 
3457 const struct attribute_group *mpi3mr_dev_groups[] = {
3458 	&mpi3mr_dev_attr_group,
3459 	NULL,
3460 };
3461