xref: /freebsd/sys/dev/smartpqi/smartpqi_ioctl.c (revision 2a63c3be158216222d89a073dcbd6a72ee4aab5a)
1 /*-
2  * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 
27 /*
28  * Management interface for smartpqi driver
29  */
30 
31 #include "smartpqi_includes.h"
32 
33 /*
34  * Wrapper function to copy to user from kernel
35  */
36 int
37 os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
38 		void *src_buf, int size, int mode)
39 {
40 	return(copyout(src_buf, dest_buf, size));
41 }
42 
43 /*
44  * Wrapper function to copy from user to kernel
45  */
46 int
47 os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
48 		void *src_buf, int size, int mode)
49 {
50 	return(copyin(src_buf, dest_buf, size));
51 }
52 
53 /*
54  * Device open function for ioctl entry
55  */
56 static int
57 smartpqi_open(struct cdev *cdev, int flags, int devtype,
58 		struct thread *td)
59 {
60 	return BSD_SUCCESS;
61 }
62 
63 /*
64  * Device close function for ioctl entry
65  */
66 static int
67 smartpqi_close(struct cdev *cdev, int flags, int devtype,
68 		struct thread *td)
69 {
70 	return BSD_SUCCESS;
71 }
72 
73 /*
74  * ioctl for getting driver info
75  */
76 static void
77 smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
78 {
79 	struct pqisrc_softstate *softs = cdev->si_drv1;
80 	pdriver_info driver_info = (pdriver_info)udata;
81 
82 	DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
83 
84 	driver_info->major_version = PQISRC_OS_VERSION;
85 	driver_info->minor_version = PQISRC_FEATURE_VERSION;
86 	driver_info->release_version = PQISRC_PATCH_VERSION;
87 	driver_info->build_revision = PQISRC_BUILD_VERSION;
88 	driver_info->max_targets = PQI_MAX_DEVICES - 1;
89 	driver_info->max_io = softs->max_io_for_scsi_ml;
90 	driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
91 
92 	DBG_FUNC("OUT\n");
93 }
94 
95 /*
96  * ioctl for getting controller info
97  */
98 static void
99 smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
100 {
101 	struct pqisrc_softstate *softs = cdev->si_drv1;
102 	device_t dev = softs->os_specific.pqi_dev;
103 	pqi_pci_info_t *pci_info = (pqi_pci_info_t *)udata;
104 	uint32_t sub_vendor = 0;
105 	uint32_t sub_device = 0;
106 	uint32_t vendor = 0;
107 	uint32_t device = 0;
108 
109 	DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
110 
111 	pci_info->bus = pci_get_bus(dev);
112 	pci_info->dev_fn = pci_get_function(dev);
113 	pci_info->domain = pci_get_domain(dev);
114 	sub_vendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
115 	sub_device = pci_read_config(dev, PCIR_SUBDEV_0, 2);
116 	pci_info->board_id = ((sub_device << 16) & 0xffff0000) | sub_vendor;
117 	vendor = pci_get_vendor(dev);
118 	device =  pci_get_device(dev);
119 	pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor;
120 	DBG_FUNC("OUT\n");
121 }
122 
123 static inline int
124 pqi_status_to_bsd_ioctl_status(int pqi_status)
125 {
126 	if (PQI_STATUS_SUCCESS == pqi_status)
127 		return BSD_SUCCESS;
128 	else
129 		return EIO;
130 }
131 
132 /*
133  * ioctl entry point for user
134  */
135 static int
136 smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
137 		int flags, struct thread *td)
138 {
139 	int bsd_status, pqi_status;
140 	struct pqisrc_softstate *softs = cdev->si_drv1;
141 
142 	DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev);
143 
144 	if (!udata) {
145 		DBG_ERR("udata is null !!\n");
146 		return EINVAL;
147 	}
148 
149 	if (pqisrc_ctrl_offline(softs)){
150 		return ENOTTY;
151 	}
152 
153 	switch (cmd) {
154 		case CCISS_GETDRIVVER:
155 			smartpqi_get_driver_info_ioctl(udata, cdev);
156 			bsd_status = BSD_SUCCESS;
157 			break;
158 		case CCISS_GETPCIINFO:
159 			smartpqi_get_pci_info_ioctl(udata, cdev);
160 			bsd_status = BSD_SUCCESS;
161 			break;
162 		case SMARTPQI_PASS_THRU:
163 		case CCISS_PASSTHRU:
164 			pqi_status = pqisrc_passthru_ioctl(softs, udata, 0);
165 			bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
166 			break;
167 		case CCISS_REGNEWD:
168 			pqi_status = pqisrc_scan_devices(softs);
169 			bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
170 			break;
171 		default:
172 			DBG_WARN( "!IOCTL cmd 0x%lx not supported\n", cmd);
173 			bsd_status = ENOTTY;
174 			break;
175 	}
176 
177 	DBG_FUNC("OUT error = %d\n", bsd_status);
178 	return bsd_status;
179 }
180 
181 static struct cdevsw smartpqi_cdevsw =
182 {
183 	.d_version = D_VERSION,
184 	.d_open    = smartpqi_open,
185 	.d_close   = smartpqi_close,
186 	.d_ioctl   = smartpqi_ioctl,
187 	.d_name    = "smartpqi",
188 };
189 
190 /*
191  * Function to create device node for ioctl
192  */
193 int
194 create_char_dev(struct pqisrc_softstate *softs, int card_index)
195 {
196 	int error = BSD_SUCCESS;
197 
198 	DBG_FUNC("IN idx = %d\n", card_index);
199 
200 	softs->os_specific.cdev = make_dev(&smartpqi_cdevsw, card_index,
201 				UID_ROOT, GID_OPERATOR, 0640,
202 				"smartpqi%u", card_index);
203 	if(softs->os_specific.cdev) {
204 		softs->os_specific.cdev->si_drv1 = softs;
205 	} else {
206 		error = ENXIO;
207 	}
208 
209 	DBG_FUNC("OUT error = %d\n", error);
210 
211 	return error;
212 }
213 
214 /*
215  * Function to destroy device node for ioctl
216  */
217 void
218 destroy_char_dev(struct pqisrc_softstate *softs)
219 {
220 	DBG_FUNC("IN\n");
221 	if (softs->os_specific.cdev) {
222 		destroy_dev(softs->os_specific.cdev);
223 		softs->os_specific.cdev = NULL;
224 	}
225 	DBG_FUNC("OUT\n");
226 }
227 
228 /*
229  * Function used to send passthru commands to adapter
230  * to support management tools. For eg. ssacli, sscon.
231  */
232 int
233 pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
234 {
235 	int ret = PQI_STATUS_SUCCESS;
236 	char *drv_buf = NULL;
237 	uint32_t tag = 0;
238 	IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg;
239 	dma_mem_t ioctl_dma_buf;
240 	pqisrc_raid_req_t request;
241 	raid_path_error_info_elem_t error_info;
242 	ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
243 	ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
244 	rcb_t *rcb = NULL;
245 
246 	memset(&request, 0, sizeof(request));
247 	memset(&error_info, 0, sizeof(error_info));
248 
249 	DBG_FUNC("IN");
250 
251 	if (pqisrc_ctrl_offline(softs))
252 		return PQI_STATUS_FAILURE;
253 
254 	if (!arg)
255 		return (PQI_STATUS_FAILURE);
256 
257 	if (iocommand->buf_size < 1 &&
258 		iocommand->Request.Type.Direction != PQIIOCTL_NONE)
259 		return PQI_STATUS_FAILURE;
260 	if (iocommand->Request.CDBLen > sizeof(request.cdb))
261 		return PQI_STATUS_FAILURE;
262 
263 	switch (iocommand->Request.Type.Direction) {
264 		case PQIIOCTL_NONE:
265 		case PQIIOCTL_WRITE:
266 		case PQIIOCTL_READ:
267 		case PQIIOCTL_BIDIRECTIONAL:
268 			break;
269 		default:
270 			return PQI_STATUS_FAILURE;
271 	}
272 
273 	if (iocommand->buf_size > 0) {
274 		memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
275 		ioctl_dma_buf.tag = "Ioctl_PassthruCmd_Buffer";
276 		ioctl_dma_buf.size = iocommand->buf_size;
277 		ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
278 		/* allocate memory */
279 		ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
280 		if (ret) {
281 			DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
282 			ret = PQI_STATUS_FAILURE;
283 			goto out;
284 		}
285 
286 		DBG_INFO("ioctl_dma_buf.dma_addr  = %p\n",(void*)ioctl_dma_buf.dma_addr);
287 		DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
288 
289 		drv_buf = (char *)ioctl_dma_buf.virt_addr;
290 		if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
291 			if ((ret = os_copy_from_user(softs, (void *)drv_buf,
292 					(void *)iocommand->buf,
293 					iocommand->buf_size, mode)) != 0) {
294 				ret = PQI_STATUS_FAILURE;
295 				goto free_mem;
296 			}
297 		}
298 	}
299 
300 	request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
301 	request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
302 									PQI_REQUEST_HEADER_LENGTH;
303 	memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
304 		sizeof(request.lun_number));
305 	memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
306 	request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
307 
308 	switch (iocommand->Request.Type.Direction) {
309 	case PQIIOCTL_NONE:
310 		request.data_direction = SOP_DATA_DIR_NONE;
311 		break;
312 	case PQIIOCTL_WRITE:
313 		request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
314 		break;
315 	case PQIIOCTL_READ:
316 		request.data_direction = SOP_DATA_DIR_TO_DEVICE;
317 		break;
318 	case PQIIOCTL_BIDIRECTIONAL:
319 		request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL;
320 		break;
321 	}
322 
323 	request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
324 	if (iocommand->buf_size > 0) {
325 		request.buffer_length = iocommand->buf_size;
326 		request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr;
327 		request.sg_descriptors[0].len = iocommand->buf_size;
328 		request.sg_descriptors[0].flags =  SG_FLAG_LAST;
329 	}
330 	tag = pqisrc_get_tag(&softs->taglist);
331 	if (INVALID_ELEM == tag) {
332 		DBG_ERR("Tag not available\n");
333 		ret = PQI_STATUS_FAILURE;
334 		goto free_mem;
335 	}
336 	request.request_id = tag;
337 	request.response_queue_id = ob_q->q_id;
338 	request.error_index = request.request_id;
339 	if (softs->timeout_in_passthrough) {
340 		request.timeout_in_sec = iocommand->Request.Timeout;
341 	}
342 
343 	rcb = &softs->rcb[tag];
344 	rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
345 	rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
346 	rcb->tag = tag;
347 	rcb->req_pending = true;
348 	/* Submit Command */
349 	ret = pqisrc_submit_cmnd(softs, ib_q, &request);
350 	if (ret != PQI_STATUS_SUCCESS) {
351 		DBG_ERR("Unable to submit command\n");
352 		goto err_out;
353 	}
354 
355 	ret = pqisrc_wait_on_condition(softs, rcb,
356 			PQISRC_PASSTHROUGH_CMD_TIMEOUT);
357 	if (ret != PQI_STATUS_SUCCESS) {
358 		DBG_ERR("Passthru IOCTL cmd timed out !!\n");
359 		goto err_out;
360 	}
361 
362 	memset(&iocommand->error_info, 0, sizeof(iocommand->error_info));
363 
364 
365 	if (rcb->status) {
366 		size_t sense_data_length;
367 
368 		memcpy(&error_info, rcb->error_info, sizeof(error_info));
369 		iocommand->error_info.ScsiStatus = error_info.status;
370 		sense_data_length = error_info.sense_data_len;
371 
372 		if (!sense_data_length)
373 			sense_data_length = error_info.resp_data_len;
374 
375 		if (sense_data_length &&
376 			(sense_data_length > sizeof(error_info.data)))
377 				sense_data_length = sizeof(error_info.data);
378 
379 		if (sense_data_length) {
380 			if (sense_data_length >
381 				sizeof(iocommand->error_info.SenseInfo))
382 				sense_data_length =
383 					sizeof(iocommand->error_info.SenseInfo);
384 			memcpy (iocommand->error_info.SenseInfo,
385 					error_info.data, sense_data_length);
386 			iocommand->error_info.SenseLen = sense_data_length;
387 		}
388 
389 		if (error_info.data_out_result ==
390 				PQI_RAID_DATA_IN_OUT_UNDERFLOW){
391 			rcb->status = REQUEST_SUCCESS;
392 		}
393 	}
394 
395 	if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 &&
396 		(iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
397 
398 		if ((ret = os_copy_to_user(softs, (void*)iocommand->buf,
399 			(void*)drv_buf, iocommand->buf_size, mode)) != 0) {
400 				DBG_ERR("Failed to copy the response\n");
401 				goto err_out;
402 		}
403 	}
404 
405 	os_reset_rcb(rcb);
406 	pqisrc_put_tag(&softs->taglist, request.request_id);
407 	if (iocommand->buf_size > 0)
408 			os_dma_mem_free(softs,&ioctl_dma_buf);
409 
410 	DBG_FUNC("OUT\n");
411 	return ret;
412 err_out:
413 	os_reset_rcb(rcb);
414 	pqisrc_put_tag(&softs->taglist, request.request_id);
415 
416 free_mem:
417 	if (iocommand->buf_size > 0)
418 		os_dma_mem_free(softs, &ioctl_dma_buf);
419 
420 out:
421 	DBG_FUNC("Failed OUT\n");
422 	return PQI_STATUS_FAILURE;
423 }
424