1 /*-
2 * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26
27 /*
28 * Management interface for smartpqi driver
29 */
30
31 #include "smartpqi_includes.h"
32
33 /*
34 * Wrapper function to copy to user from kernel
35 */
36 int
os_copy_to_user(struct pqisrc_softstate * softs,void * dest_buf,void * src_buf,int size,int mode)37 os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
38 void *src_buf, int size, int mode)
39 {
40 return(copyout(src_buf, dest_buf, size));
41 }
42
43 /*
44 * Wrapper function to copy from user to kernel
45 */
46 int
os_copy_from_user(struct pqisrc_softstate * softs,void * dest_buf,void * src_buf,int size,int mode)47 os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
48 void *src_buf, int size, int mode)
49 {
50 return(copyin(src_buf, dest_buf, size));
51 }
52
53 /*
54 * Device open function for ioctl entry
55 */
56 static int
smartpqi_open(struct cdev * cdev,int flags,int devtype,struct thread * td)57 smartpqi_open(struct cdev *cdev, int flags, int devtype,
58 struct thread *td)
59 {
60 return BSD_SUCCESS;
61 }
62
63 /*
64 * Device close function for ioctl entry
65 */
66 static int
smartpqi_close(struct cdev * cdev,int flags,int devtype,struct thread * td)67 smartpqi_close(struct cdev *cdev, int flags, int devtype,
68 struct thread *td)
69 {
70 return BSD_SUCCESS;
71 }
72
73 /*
74 * ioctl for getting driver info
75 */
76 static void
smartpqi_get_driver_info_ioctl(caddr_t udata,struct cdev * cdev)77 smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
78 {
79 struct pqisrc_softstate *softs = cdev->si_drv1;
80 pdriver_info driver_info = (pdriver_info)udata;
81
82 DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
83
84 driver_info->major_version = PQISRC_DRIVER_MAJOR;
85 driver_info->minor_version = PQISRC_DRIVER_MINOR;
86 driver_info->release_version = PQISRC_DRIVER_RELEASE;
87 driver_info->build_revision = PQISRC_DRIVER_REVISION;
88 driver_info->max_targets = PQI_MAX_DEVICES - 1;
89 driver_info->max_io = softs->max_io_for_scsi_ml;
90 driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
91
92 DBG_FUNC("OUT\n");
93 }
94
95 /*
96 * ioctl for getting controller info
97 */
98 static void
smartpqi_get_pci_info_ioctl(caddr_t udata,struct cdev * cdev)99 smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
100 {
101 struct pqisrc_softstate *softs = cdev->si_drv1;
102 device_t dev = softs->os_specific.pqi_dev;
103 pqi_pci_info_t *pci_info = (pqi_pci_info_t *)udata;
104 uint32_t sub_vendor = 0;
105 uint32_t sub_device = 0;
106 uint32_t vendor = 0;
107 uint32_t device = 0;
108
109 DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
110
111 pci_info->bus = pci_get_bus(dev);
112 pci_info->dev_fn = pci_get_function(dev);
113 pci_info->domain = pci_get_domain(dev);
114 sub_vendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
115 sub_device = pci_read_config(dev, PCIR_SUBDEV_0, 2);
116 pci_info->board_id = ((sub_device << 16) & 0xffff0000) | sub_vendor;
117 vendor = pci_get_vendor(dev);
118 device = pci_get_device(dev);
119 pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor;
120
121 DBG_FUNC("OUT\n");
122 }
123
124 static inline int
pqi_status_to_bsd_ioctl_status(int pqi_status)125 pqi_status_to_bsd_ioctl_status(int pqi_status)
126 {
127 if (PQI_STATUS_SUCCESS == pqi_status)
128 return BSD_SUCCESS;
129 else
130 return EIO;
131 }
132
133 /*
134 * ioctl entry point for user
135 */
136 static int
smartpqi_ioctl(struct cdev * cdev,u_long cmd,caddr_t udata,int flags,struct thread * td)137 smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
138 int flags, struct thread *td)
139 {
140 int bsd_status, pqi_status;
141 struct pqisrc_softstate *softs = cdev->si_drv1;
142
143 DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev);
144
145 if (!udata) {
146 DBG_ERR("udata is null !!\n");
147 return EINVAL;
148 }
149
150 if (pqisrc_ctrl_offline(softs)){
151 return ENOTTY;
152 }
153
154 switch (cmd) {
155 case CCISS_GETDRIVVER:
156 smartpqi_get_driver_info_ioctl(udata, cdev);
157 bsd_status = BSD_SUCCESS;
158 break;
159 case CCISS_GETPCIINFO:
160 smartpqi_get_pci_info_ioctl(udata, cdev);
161 bsd_status = BSD_SUCCESS;
162 break;
163 case SMARTPQI_PASS_THRU:
164 case CCISS_PASSTHRU:
165 pqi_status = pqisrc_passthru_ioctl(softs, udata, 0);
166 bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
167 break;
168 case CCISS_REGNEWD:
169 pqi_status = pqisrc_scan_devices(softs);
170 bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
171 break;
172 default:
173 DBG_WARN( "!IOCTL cmd 0x%lx not supported\n", cmd);
174 bsd_status = ENOTTY;
175 break;
176 }
177
178 DBG_FUNC("OUT error = %d\n", bsd_status);
179
180 return bsd_status;
181 }
182
183 static struct cdevsw smartpqi_cdevsw =
184 {
185 .d_version = D_VERSION,
186 .d_open = smartpqi_open,
187 .d_close = smartpqi_close,
188 .d_ioctl = smartpqi_ioctl,
189 .d_name = "smartpqi",
190 };
191
192 /*
193 * Function to create device node for ioctl
194 */
195 int
create_char_dev(struct pqisrc_softstate * softs,int card_index)196 create_char_dev(struct pqisrc_softstate *softs, int card_index)
197 {
198 int error = BSD_SUCCESS;
199
200 DBG_FUNC("IN idx = %d\n", card_index);
201
202 softs->os_specific.cdev = make_dev(&smartpqi_cdevsw, card_index,
203 UID_ROOT, GID_OPERATOR, 0640,
204 "smartpqi%u", card_index);
205 if(softs->os_specific.cdev) {
206 softs->os_specific.cdev->si_drv1 = softs;
207 } else {
208 error = ENXIO;
209 }
210
211 DBG_FUNC("OUT error = %d\n", error);
212
213 return error;
214 }
215
216 /*
217 * Function to destroy device node for ioctl
218 */
219 void
destroy_char_dev(struct pqisrc_softstate * softs)220 destroy_char_dev(struct pqisrc_softstate *softs)
221 {
222 DBG_FUNC("IN\n");
223 if (softs->os_specific.cdev) {
224 destroy_dev(softs->os_specific.cdev);
225 softs->os_specific.cdev = NULL;
226 }
227 DBG_FUNC("OUT\n");
228 }
229
230 /*
231 * Function used to send passthru commands to adapter
232 * to support management tools. For eg. ssacli, sscon.
233 */
234 int
pqisrc_passthru_ioctl(struct pqisrc_softstate * softs,void * arg,int mode)235 pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
236 {
237 int ret;
238 char *drv_buf = NULL;
239 uint32_t tag = 0;
240 IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg;
241 dma_mem_t ioctl_dma_buf;
242 pqisrc_raid_req_t request;
243 raid_path_error_info_elem_t error_info;
244 ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
245 ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
246 rcb_t *rcb = NULL;
247
248 memset(&request, 0, sizeof(request));
249 memset(&error_info, 0, sizeof(error_info));
250
251 DBG_FUNC("IN\n");
252
253 if (pqisrc_ctrl_offline(softs))
254 return PQI_STATUS_FAILURE;
255
256 if (!arg)
257 return PQI_STATUS_FAILURE;
258
259 if (iocommand->buf_size < 1 &&
260 iocommand->Request.Type.Direction != PQIIOCTL_NONE)
261 return PQI_STATUS_FAILURE;
262 if (iocommand->Request.CDBLen > sizeof(request.cmd.cdb))
263 return PQI_STATUS_FAILURE;
264
265 switch (iocommand->Request.Type.Direction) {
266 case PQIIOCTL_NONE:
267 case PQIIOCTL_WRITE:
268 case PQIIOCTL_READ:
269 case PQIIOCTL_BIDIRECTIONAL:
270 break;
271 default:
272 return PQI_STATUS_FAILURE;
273 }
274
275 if (iocommand->buf_size > 0) {
276 memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
277 os_strlcpy(ioctl_dma_buf.tag, "Ioctl_PassthruCmd_Buffer", sizeof(ioctl_dma_buf.tag));
278 ioctl_dma_buf.size = iocommand->buf_size;
279 ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
280 /* allocate memory */
281 ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
282 if (ret) {
283 DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
284 goto out;
285 }
286
287 DBG_IO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr);
288 DBG_IO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
289
290 drv_buf = (char *)ioctl_dma_buf.virt_addr;
291 if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
292 ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf, iocommand->buf_size, mode);
293 if (ret != 0) {
294 goto free_mem;
295 }
296 }
297 }
298
299 request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
300 request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
301 PQI_REQUEST_HEADER_LENGTH;
302 memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
303 sizeof(request.lun_number));
304 memcpy(request.cmd.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
305 request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
306
307 switch (iocommand->Request.Type.Direction) {
308 case PQIIOCTL_NONE:
309 request.data_direction = SOP_DATA_DIR_NONE;
310 break;
311 case PQIIOCTL_WRITE:
312 request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
313 break;
314 case PQIIOCTL_READ:
315 request.data_direction = SOP_DATA_DIR_TO_DEVICE;
316 break;
317 case PQIIOCTL_BIDIRECTIONAL:
318 request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL;
319 break;
320 }
321
322 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
323 if (iocommand->buf_size > 0) {
324 request.buffer_length = iocommand->buf_size;
325 request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr;
326 request.sg_descriptors[0].len = iocommand->buf_size;
327 request.sg_descriptors[0].flags = SG_FLAG_LAST;
328 }
329 tag = pqisrc_get_tag(&softs->taglist);
330 if (INVALID_ELEM == tag) {
331 DBG_ERR("Tag not available\n");
332 goto free_mem;
333 }
334 request.request_id = tag;
335 request.response_queue_id = ob_q->q_id;
336 request.error_index = request.request_id;
337 if (softs->timeout_in_passthrough) {
338 request.timeout_in_sec = iocommand->Request.Timeout;
339 }
340
341 rcb = &softs->rcb[tag];
342 rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
343 rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
344 rcb->tag = tag;
345 rcb->req_pending = true;
346 /* Submit Command */
347 ret = pqisrc_submit_cmnd(softs, ib_q, &request);
348 if (ret != PQI_STATUS_SUCCESS) {
349 DBG_ERR("Unable to submit command\n");
350 goto err_out;
351 }
352
353 ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_PASSTHROUGH_CMD_TIMEOUT);
354 if (ret != PQI_STATUS_SUCCESS) {
355 DBG_ERR("Passthru IOCTL cmd timed out !!\n");
356 goto err_out;
357 }
358
359 memset(&iocommand->error_info, 0, sizeof(iocommand->error_info));
360
361
362 if (rcb->status) {
363 size_t sense_data_length;
364
365 memcpy(&error_info, rcb->error_info, sizeof(error_info));
366 iocommand->error_info.ScsiStatus = error_info.status;
367 sense_data_length = error_info.sense_data_len;
368
369 if (!sense_data_length)
370 sense_data_length = error_info.resp_data_len;
371
372 if (sense_data_length &&
373 (sense_data_length > sizeof(error_info.data)))
374 sense_data_length = sizeof(error_info.data);
375
376 if (sense_data_length) {
377 if (sense_data_length >
378 sizeof(iocommand->error_info.SenseInfo))
379 sense_data_length =
380 sizeof(iocommand->error_info.SenseInfo);
381 memcpy (iocommand->error_info.SenseInfo,
382 error_info.data, sense_data_length);
383 iocommand->error_info.SenseLen = sense_data_length;
384 }
385
386 if (error_info.data_out_result == PQI_RAID_DATA_IN_OUT_UNDERFLOW) {
387 rcb->status = PQI_STATUS_SUCCESS;
388 }
389 }
390
391 if (rcb->status == PQI_STATUS_SUCCESS && iocommand->buf_size > 0 &&
392 (iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
393
394 ret = os_copy_to_user(softs, (void*)iocommand->buf, (void*)drv_buf, iocommand->buf_size, mode);
395 if (ret != 0) {
396 DBG_ERR("Failed to copy the response\n");
397 goto err_out;
398 }
399 }
400
401 os_reset_rcb(rcb);
402 pqisrc_put_tag(&softs->taglist, request.request_id);
403 if (iocommand->buf_size > 0)
404 os_dma_mem_free(softs,&ioctl_dma_buf);
405
406 DBG_FUNC("OUT\n");
407 return PQI_STATUS_SUCCESS;
408
409 err_out:
410 os_reset_rcb(rcb);
411 pqisrc_put_tag(&softs->taglist, request.request_id);
412
413 free_mem:
414 if (iocommand->buf_size > 0)
415 os_dma_mem_free(softs, &ioctl_dma_buf);
416
417 out:
418 DBG_FUNC("Failed OUT\n");
419 return PQI_STATUS_FAILURE;
420 }
421