xref: /linux/drivers/pci/switch/switchtec.c (revision 17cfcb68af3bc7d5e8ae08779b1853310a2949f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microsemi Switchtec(tm) PCIe Management Driver
4  * Copyright (c) 2017, Microsemi Corporation
5  */
6 
7 #include <linux/switchtec.h>
8 #include <linux/switchtec_ioctl.h>
9 
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/fs.h>
13 #include <linux/uaccess.h>
14 #include <linux/poll.h>
15 #include <linux/wait.h>
16 #include <linux/io-64-nonatomic-lo-hi.h>
17 #include <linux/nospec.h>
18 
19 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
20 MODULE_VERSION("0.1");
21 MODULE_LICENSE("GPL");
22 MODULE_AUTHOR("Microsemi Corporation");
23 
24 static int max_devices = 16;
25 module_param(max_devices, int, 0644);
26 MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
27 
28 static bool use_dma_mrpc = 1;
29 module_param(use_dma_mrpc, bool, 0644);
30 MODULE_PARM_DESC(use_dma_mrpc,
31 		 "Enable the use of the DMA MRPC feature");
32 
33 static int nirqs = 32;
34 module_param(nirqs, int, 0644);
35 MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful for NTB applications)");
36 
37 static dev_t switchtec_devt;
38 static DEFINE_IDA(switchtec_minor_ida);
39 
40 struct class *switchtec_class;
41 EXPORT_SYMBOL_GPL(switchtec_class);
42 
43 enum mrpc_state {
44 	MRPC_IDLE = 0,
45 	MRPC_QUEUED,
46 	MRPC_RUNNING,
47 	MRPC_DONE,
48 };
49 
50 struct switchtec_user {
51 	struct switchtec_dev *stdev;
52 
53 	enum mrpc_state state;
54 
55 	struct completion comp;
56 	struct kref kref;
57 	struct list_head list;
58 
59 	u32 cmd;
60 	u32 status;
61 	u32 return_code;
62 	size_t data_len;
63 	size_t read_len;
64 	unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
65 	int event_cnt;
66 };
67 
68 static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
69 {
70 	struct switchtec_user *stuser;
71 
72 	stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
73 	if (!stuser)
74 		return ERR_PTR(-ENOMEM);
75 
76 	get_device(&stdev->dev);
77 	stuser->stdev = stdev;
78 	kref_init(&stuser->kref);
79 	INIT_LIST_HEAD(&stuser->list);
80 	init_completion(&stuser->comp);
81 	stuser->event_cnt = atomic_read(&stdev->event_cnt);
82 
83 	dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
84 
85 	return stuser;
86 }
87 
88 static void stuser_free(struct kref *kref)
89 {
90 	struct switchtec_user *stuser;
91 
92 	stuser = container_of(kref, struct switchtec_user, kref);
93 
94 	dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
95 
96 	put_device(&stuser->stdev->dev);
97 	kfree(stuser);
98 }
99 
100 static void stuser_put(struct switchtec_user *stuser)
101 {
102 	kref_put(&stuser->kref, stuser_free);
103 }
104 
105 static void stuser_set_state(struct switchtec_user *stuser,
106 			     enum mrpc_state state)
107 {
108 	/* requires the mrpc_mutex to already be held when called */
109 
110 	const char * const state_names[] = {
111 		[MRPC_IDLE] = "IDLE",
112 		[MRPC_QUEUED] = "QUEUED",
113 		[MRPC_RUNNING] = "RUNNING",
114 		[MRPC_DONE] = "DONE",
115 	};
116 
117 	stuser->state = state;
118 
119 	dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
120 		stuser, state_names[state]);
121 }
122 
123 static void mrpc_complete_cmd(struct switchtec_dev *stdev);
124 
125 static void flush_wc_buf(struct switchtec_dev *stdev)
126 {
127 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
128 
129 	/*
130 	 * odb (outbound doorbell) register is processed by low latency
131 	 * hardware and w/o side effect
132 	 */
133 	mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
134 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
135 	ioread32(&mmio_dbmsg->odb);
136 }
137 
138 static void mrpc_cmd_submit(struct switchtec_dev *stdev)
139 {
140 	/* requires the mrpc_mutex to already be held when called */
141 
142 	struct switchtec_user *stuser;
143 
144 	if (stdev->mrpc_busy)
145 		return;
146 
147 	if (list_empty(&stdev->mrpc_queue))
148 		return;
149 
150 	stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
151 			    list);
152 
153 	if (stdev->dma_mrpc) {
154 		stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
155 		memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
156 	}
157 
158 	stuser_set_state(stuser, MRPC_RUNNING);
159 	stdev->mrpc_busy = 1;
160 	memcpy_toio(&stdev->mmio_mrpc->input_data,
161 		    stuser->data, stuser->data_len);
162 	flush_wc_buf(stdev);
163 	iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
164 
165 	schedule_delayed_work(&stdev->mrpc_timeout,
166 			      msecs_to_jiffies(500));
167 }
168 
169 static int mrpc_queue_cmd(struct switchtec_user *stuser)
170 {
171 	/* requires the mrpc_mutex to already be held when called */
172 
173 	struct switchtec_dev *stdev = stuser->stdev;
174 
175 	kref_get(&stuser->kref);
176 	stuser->read_len = sizeof(stuser->data);
177 	stuser_set_state(stuser, MRPC_QUEUED);
178 	init_completion(&stuser->comp);
179 	list_add_tail(&stuser->list, &stdev->mrpc_queue);
180 
181 	mrpc_cmd_submit(stdev);
182 
183 	return 0;
184 }
185 
186 static void mrpc_complete_cmd(struct switchtec_dev *stdev)
187 {
188 	/* requires the mrpc_mutex to already be held when called */
189 	struct switchtec_user *stuser;
190 
191 	if (list_empty(&stdev->mrpc_queue))
192 		return;
193 
194 	stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
195 			    list);
196 
197 	if (stdev->dma_mrpc)
198 		stuser->status = stdev->dma_mrpc->status;
199 	else
200 		stuser->status = ioread32(&stdev->mmio_mrpc->status);
201 
202 	if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
203 		return;
204 
205 	stuser_set_state(stuser, MRPC_DONE);
206 	stuser->return_code = 0;
207 
208 	if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
209 		goto out;
210 
211 	if (stdev->dma_mrpc)
212 		stuser->return_code = stdev->dma_mrpc->rtn_code;
213 	else
214 		stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
215 	if (stuser->return_code != 0)
216 		goto out;
217 
218 	if (stdev->dma_mrpc)
219 		memcpy(stuser->data, &stdev->dma_mrpc->data,
220 			      stuser->read_len);
221 	else
222 		memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
223 			      stuser->read_len);
224 out:
225 	complete_all(&stuser->comp);
226 	list_del_init(&stuser->list);
227 	stuser_put(stuser);
228 	stdev->mrpc_busy = 0;
229 
230 	mrpc_cmd_submit(stdev);
231 }
232 
233 static void mrpc_event_work(struct work_struct *work)
234 {
235 	struct switchtec_dev *stdev;
236 
237 	stdev = container_of(work, struct switchtec_dev, mrpc_work);
238 
239 	dev_dbg(&stdev->dev, "%s\n", __func__);
240 
241 	mutex_lock(&stdev->mrpc_mutex);
242 	cancel_delayed_work(&stdev->mrpc_timeout);
243 	mrpc_complete_cmd(stdev);
244 	mutex_unlock(&stdev->mrpc_mutex);
245 }
246 
247 static void mrpc_timeout_work(struct work_struct *work)
248 {
249 	struct switchtec_dev *stdev;
250 	u32 status;
251 
252 	stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
253 
254 	dev_dbg(&stdev->dev, "%s\n", __func__);
255 
256 	mutex_lock(&stdev->mrpc_mutex);
257 
258 	if (stdev->dma_mrpc)
259 		status = stdev->dma_mrpc->status;
260 	else
261 		status = ioread32(&stdev->mmio_mrpc->status);
262 	if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
263 		schedule_delayed_work(&stdev->mrpc_timeout,
264 				      msecs_to_jiffies(500));
265 		goto out;
266 	}
267 
268 	mrpc_complete_cmd(stdev);
269 out:
270 	mutex_unlock(&stdev->mrpc_mutex);
271 }
272 
273 static ssize_t device_version_show(struct device *dev,
274 	struct device_attribute *attr, char *buf)
275 {
276 	struct switchtec_dev *stdev = to_stdev(dev);
277 	u32 ver;
278 
279 	ver = ioread32(&stdev->mmio_sys_info->device_version);
280 
281 	return sprintf(buf, "%x\n", ver);
282 }
283 static DEVICE_ATTR_RO(device_version);
284 
285 static ssize_t fw_version_show(struct device *dev,
286 	struct device_attribute *attr, char *buf)
287 {
288 	struct switchtec_dev *stdev = to_stdev(dev);
289 	u32 ver;
290 
291 	ver = ioread32(&stdev->mmio_sys_info->firmware_version);
292 
293 	return sprintf(buf, "%08x\n", ver);
294 }
295 static DEVICE_ATTR_RO(fw_version);
296 
297 static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
298 {
299 	int i;
300 
301 	memcpy_fromio(buf, attr, len);
302 	buf[len] = '\n';
303 	buf[len + 1] = 0;
304 
305 	for (i = len - 1; i > 0; i--) {
306 		if (buf[i] != ' ')
307 			break;
308 		buf[i] = '\n';
309 		buf[i + 1] = 0;
310 	}
311 
312 	return strlen(buf);
313 }
314 
315 #define DEVICE_ATTR_SYS_INFO_STR(field) \
316 static ssize_t field ## _show(struct device *dev, \
317 	struct device_attribute *attr, char *buf) \
318 { \
319 	struct switchtec_dev *stdev = to_stdev(dev); \
320 	return io_string_show(buf, &stdev->mmio_sys_info->field, \
321 			    sizeof(stdev->mmio_sys_info->field)); \
322 } \
323 \
324 static DEVICE_ATTR_RO(field)
325 
326 DEVICE_ATTR_SYS_INFO_STR(vendor_id);
327 DEVICE_ATTR_SYS_INFO_STR(product_id);
328 DEVICE_ATTR_SYS_INFO_STR(product_revision);
329 DEVICE_ATTR_SYS_INFO_STR(component_vendor);
330 
331 static ssize_t component_id_show(struct device *dev,
332 	struct device_attribute *attr, char *buf)
333 {
334 	struct switchtec_dev *stdev = to_stdev(dev);
335 	int id = ioread16(&stdev->mmio_sys_info->component_id);
336 
337 	return sprintf(buf, "PM%04X\n", id);
338 }
339 static DEVICE_ATTR_RO(component_id);
340 
341 static ssize_t component_revision_show(struct device *dev,
342 	struct device_attribute *attr, char *buf)
343 {
344 	struct switchtec_dev *stdev = to_stdev(dev);
345 	int rev = ioread8(&stdev->mmio_sys_info->component_revision);
346 
347 	return sprintf(buf, "%d\n", rev);
348 }
349 static DEVICE_ATTR_RO(component_revision);
350 
351 static ssize_t partition_show(struct device *dev,
352 	struct device_attribute *attr, char *buf)
353 {
354 	struct switchtec_dev *stdev = to_stdev(dev);
355 
356 	return sprintf(buf, "%d\n", stdev->partition);
357 }
358 static DEVICE_ATTR_RO(partition);
359 
360 static ssize_t partition_count_show(struct device *dev,
361 	struct device_attribute *attr, char *buf)
362 {
363 	struct switchtec_dev *stdev = to_stdev(dev);
364 
365 	return sprintf(buf, "%d\n", stdev->partition_count);
366 }
367 static DEVICE_ATTR_RO(partition_count);
368 
369 static struct attribute *switchtec_device_attrs[] = {
370 	&dev_attr_device_version.attr,
371 	&dev_attr_fw_version.attr,
372 	&dev_attr_vendor_id.attr,
373 	&dev_attr_product_id.attr,
374 	&dev_attr_product_revision.attr,
375 	&dev_attr_component_vendor.attr,
376 	&dev_attr_component_id.attr,
377 	&dev_attr_component_revision.attr,
378 	&dev_attr_partition.attr,
379 	&dev_attr_partition_count.attr,
380 	NULL,
381 };
382 
383 ATTRIBUTE_GROUPS(switchtec_device);
384 
385 static int switchtec_dev_open(struct inode *inode, struct file *filp)
386 {
387 	struct switchtec_dev *stdev;
388 	struct switchtec_user *stuser;
389 
390 	stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
391 
392 	stuser = stuser_create(stdev);
393 	if (IS_ERR(stuser))
394 		return PTR_ERR(stuser);
395 
396 	filp->private_data = stuser;
397 	stream_open(inode, filp);
398 
399 	dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
400 
401 	return 0;
402 }
403 
404 static int switchtec_dev_release(struct inode *inode, struct file *filp)
405 {
406 	struct switchtec_user *stuser = filp->private_data;
407 
408 	stuser_put(stuser);
409 
410 	return 0;
411 }
412 
413 static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
414 {
415 	if (mutex_lock_interruptible(&stdev->mrpc_mutex))
416 		return -EINTR;
417 
418 	if (!stdev->alive) {
419 		mutex_unlock(&stdev->mrpc_mutex);
420 		return -ENODEV;
421 	}
422 
423 	return 0;
424 }
425 
426 static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
427 				   size_t size, loff_t *off)
428 {
429 	struct switchtec_user *stuser = filp->private_data;
430 	struct switchtec_dev *stdev = stuser->stdev;
431 	int rc;
432 
433 	if (size < sizeof(stuser->cmd) ||
434 	    size > sizeof(stuser->cmd) + sizeof(stuser->data))
435 		return -EINVAL;
436 
437 	stuser->data_len = size - sizeof(stuser->cmd);
438 
439 	rc = lock_mutex_and_test_alive(stdev);
440 	if (rc)
441 		return rc;
442 
443 	if (stuser->state != MRPC_IDLE) {
444 		rc = -EBADE;
445 		goto out;
446 	}
447 
448 	rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
449 	if (rc) {
450 		rc = -EFAULT;
451 		goto out;
452 	}
453 
454 	data += sizeof(stuser->cmd);
455 	rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
456 	if (rc) {
457 		rc = -EFAULT;
458 		goto out;
459 	}
460 
461 	rc = mrpc_queue_cmd(stuser);
462 
463 out:
464 	mutex_unlock(&stdev->mrpc_mutex);
465 
466 	if (rc)
467 		return rc;
468 
469 	return size;
470 }
471 
472 static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
473 				  size_t size, loff_t *off)
474 {
475 	struct switchtec_user *stuser = filp->private_data;
476 	struct switchtec_dev *stdev = stuser->stdev;
477 	int rc;
478 
479 	if (size < sizeof(stuser->cmd) ||
480 	    size > sizeof(stuser->cmd) + sizeof(stuser->data))
481 		return -EINVAL;
482 
483 	rc = lock_mutex_and_test_alive(stdev);
484 	if (rc)
485 		return rc;
486 
487 	if (stuser->state == MRPC_IDLE) {
488 		mutex_unlock(&stdev->mrpc_mutex);
489 		return -EBADE;
490 	}
491 
492 	stuser->read_len = size - sizeof(stuser->return_code);
493 
494 	mutex_unlock(&stdev->mrpc_mutex);
495 
496 	if (filp->f_flags & O_NONBLOCK) {
497 		if (!try_wait_for_completion(&stuser->comp))
498 			return -EAGAIN;
499 	} else {
500 		rc = wait_for_completion_interruptible(&stuser->comp);
501 		if (rc < 0)
502 			return rc;
503 	}
504 
505 	rc = lock_mutex_and_test_alive(stdev);
506 	if (rc)
507 		return rc;
508 
509 	if (stuser->state != MRPC_DONE) {
510 		mutex_unlock(&stdev->mrpc_mutex);
511 		return -EBADE;
512 	}
513 
514 	rc = copy_to_user(data, &stuser->return_code,
515 			  sizeof(stuser->return_code));
516 	if (rc) {
517 		rc = -EFAULT;
518 		goto out;
519 	}
520 
521 	data += sizeof(stuser->return_code);
522 	rc = copy_to_user(data, &stuser->data,
523 			  size - sizeof(stuser->return_code));
524 	if (rc) {
525 		rc = -EFAULT;
526 		goto out;
527 	}
528 
529 	stuser_set_state(stuser, MRPC_IDLE);
530 
531 out:
532 	mutex_unlock(&stdev->mrpc_mutex);
533 
534 	if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
535 		return size;
536 	else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
537 		return -ENXIO;
538 	else
539 		return -EBADMSG;
540 }
541 
542 static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
543 {
544 	struct switchtec_user *stuser = filp->private_data;
545 	struct switchtec_dev *stdev = stuser->stdev;
546 	__poll_t ret = 0;
547 
548 	poll_wait(filp, &stuser->comp.wait, wait);
549 	poll_wait(filp, &stdev->event_wq, wait);
550 
551 	if (lock_mutex_and_test_alive(stdev))
552 		return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
553 
554 	mutex_unlock(&stdev->mrpc_mutex);
555 
556 	if (try_wait_for_completion(&stuser->comp))
557 		ret |= EPOLLIN | EPOLLRDNORM;
558 
559 	if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
560 		ret |= EPOLLPRI | EPOLLRDBAND;
561 
562 	return ret;
563 }
564 
565 static int ioctl_flash_info(struct switchtec_dev *stdev,
566 			    struct switchtec_ioctl_flash_info __user *uinfo)
567 {
568 	struct switchtec_ioctl_flash_info info = {0};
569 	struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
570 
571 	info.flash_length = ioread32(&fi->flash_length);
572 	info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
573 
574 	if (copy_to_user(uinfo, &info, sizeof(info)))
575 		return -EFAULT;
576 
577 	return 0;
578 }
579 
580 static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
581 			     struct partition_info __iomem *pi)
582 {
583 	info->address = ioread32(&pi->address);
584 	info->length = ioread32(&pi->length);
585 }
586 
587 static int ioctl_flash_part_info(struct switchtec_dev *stdev,
588 	struct switchtec_ioctl_flash_part_info __user *uinfo)
589 {
590 	struct switchtec_ioctl_flash_part_info info = {0};
591 	struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
592 	struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
593 	u32 active_addr = -1;
594 
595 	if (copy_from_user(&info, uinfo, sizeof(info)))
596 		return -EFAULT;
597 
598 	switch (info.flash_partition) {
599 	case SWITCHTEC_IOCTL_PART_CFG0:
600 		active_addr = ioread32(&fi->active_cfg);
601 		set_fw_info_part(&info, &fi->cfg0);
602 		if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
603 			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
604 		break;
605 	case SWITCHTEC_IOCTL_PART_CFG1:
606 		active_addr = ioread32(&fi->active_cfg);
607 		set_fw_info_part(&info, &fi->cfg1);
608 		if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
609 			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
610 		break;
611 	case SWITCHTEC_IOCTL_PART_IMG0:
612 		active_addr = ioread32(&fi->active_img);
613 		set_fw_info_part(&info, &fi->img0);
614 		if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
615 			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
616 		break;
617 	case SWITCHTEC_IOCTL_PART_IMG1:
618 		active_addr = ioread32(&fi->active_img);
619 		set_fw_info_part(&info, &fi->img1);
620 		if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
621 			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
622 		break;
623 	case SWITCHTEC_IOCTL_PART_NVLOG:
624 		set_fw_info_part(&info, &fi->nvlog);
625 		break;
626 	case SWITCHTEC_IOCTL_PART_VENDOR0:
627 		set_fw_info_part(&info, &fi->vendor[0]);
628 		break;
629 	case SWITCHTEC_IOCTL_PART_VENDOR1:
630 		set_fw_info_part(&info, &fi->vendor[1]);
631 		break;
632 	case SWITCHTEC_IOCTL_PART_VENDOR2:
633 		set_fw_info_part(&info, &fi->vendor[2]);
634 		break;
635 	case SWITCHTEC_IOCTL_PART_VENDOR3:
636 		set_fw_info_part(&info, &fi->vendor[3]);
637 		break;
638 	case SWITCHTEC_IOCTL_PART_VENDOR4:
639 		set_fw_info_part(&info, &fi->vendor[4]);
640 		break;
641 	case SWITCHTEC_IOCTL_PART_VENDOR5:
642 		set_fw_info_part(&info, &fi->vendor[5]);
643 		break;
644 	case SWITCHTEC_IOCTL_PART_VENDOR6:
645 		set_fw_info_part(&info, &fi->vendor[6]);
646 		break;
647 	case SWITCHTEC_IOCTL_PART_VENDOR7:
648 		set_fw_info_part(&info, &fi->vendor[7]);
649 		break;
650 	default:
651 		return -EINVAL;
652 	}
653 
654 	if (info.address == active_addr)
655 		info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
656 
657 	if (copy_to_user(uinfo, &info, sizeof(info)))
658 		return -EFAULT;
659 
660 	return 0;
661 }
662 
663 static int ioctl_event_summary(struct switchtec_dev *stdev,
664 	struct switchtec_user *stuser,
665 	struct switchtec_ioctl_event_summary __user *usum,
666 	size_t size)
667 {
668 	struct switchtec_ioctl_event_summary *s;
669 	int i;
670 	u32 reg;
671 	int ret = 0;
672 
673 	s = kzalloc(sizeof(*s), GFP_KERNEL);
674 	if (!s)
675 		return -ENOMEM;
676 
677 	s->global = ioread32(&stdev->mmio_sw_event->global_summary);
678 	s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
679 	s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
680 
681 	for (i = 0; i < stdev->partition_count; i++) {
682 		reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
683 		s->part[i] = reg;
684 	}
685 
686 	for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
687 		reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
688 		if (reg != PCI_VENDOR_ID_MICROSEMI)
689 			break;
690 
691 		reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
692 		s->pff[i] = reg;
693 	}
694 
695 	if (copy_to_user(usum, s, size)) {
696 		ret = -EFAULT;
697 		goto error_case;
698 	}
699 
700 	stuser->event_cnt = atomic_read(&stdev->event_cnt);
701 
702 error_case:
703 	kfree(s);
704 	return ret;
705 }
706 
707 static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
708 				  size_t offset, int index)
709 {
710 	return (void __iomem *)stdev->mmio_sw_event + offset;
711 }
712 
713 static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
714 				size_t offset, int index)
715 {
716 	return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
717 }
718 
719 static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
720 			       size_t offset, int index)
721 {
722 	return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
723 }
724 
725 #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
726 #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
727 #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
728 
729 static const struct event_reg {
730 	size_t offset;
731 	u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
732 				size_t offset, int index);
733 } event_regs[] = {
734 	EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
735 	EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
736 	EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
737 	EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
738 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
739 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
740 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
741 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
742 	EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
743 	EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
744 	       twi_mrpc_comp_async_hdr),
745 	EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
746 	EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
747 	       cli_mrpc_comp_async_hdr),
748 	EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
749 	EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
750 	EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
751 	EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
752 	EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
753 	EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
754 	EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
755 	EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
756 	EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
757 	EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
758 	EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
759 	EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
760 	EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
761 	EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
762 	EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
763 	EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
764 	EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
765 	EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
766 };
767 
768 static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
769 				   int event_id, int index)
770 {
771 	size_t off;
772 
773 	if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
774 		return ERR_PTR(-EINVAL);
775 
776 	off = event_regs[event_id].offset;
777 
778 	if (event_regs[event_id].map_reg == part_ev_reg) {
779 		if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
780 			index = stdev->partition;
781 		else if (index < 0 || index >= stdev->partition_count)
782 			return ERR_PTR(-EINVAL);
783 	} else if (event_regs[event_id].map_reg == pff_ev_reg) {
784 		if (index < 0 || index >= stdev->pff_csr_count)
785 			return ERR_PTR(-EINVAL);
786 	}
787 
788 	return event_regs[event_id].map_reg(stdev, off, index);
789 }
790 
791 static int event_ctl(struct switchtec_dev *stdev,
792 		     struct switchtec_ioctl_event_ctl *ctl)
793 {
794 	int i;
795 	u32 __iomem *reg;
796 	u32 hdr;
797 
798 	reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
799 	if (IS_ERR(reg))
800 		return PTR_ERR(reg);
801 
802 	hdr = ioread32(reg);
803 	for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
804 		ctl->data[i] = ioread32(&reg[i + 1]);
805 
806 	ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
807 	ctl->count = (hdr >> 5) & 0xFF;
808 
809 	if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
810 		hdr &= ~SWITCHTEC_EVENT_CLEAR;
811 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
812 		hdr |= SWITCHTEC_EVENT_EN_IRQ;
813 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
814 		hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
815 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
816 		hdr |= SWITCHTEC_EVENT_EN_LOG;
817 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
818 		hdr &= ~SWITCHTEC_EVENT_EN_LOG;
819 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
820 		hdr |= SWITCHTEC_EVENT_EN_CLI;
821 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
822 		hdr &= ~SWITCHTEC_EVENT_EN_CLI;
823 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
824 		hdr |= SWITCHTEC_EVENT_FATAL;
825 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
826 		hdr &= ~SWITCHTEC_EVENT_FATAL;
827 
828 	if (ctl->flags)
829 		iowrite32(hdr, reg);
830 
831 	ctl->flags = 0;
832 	if (hdr & SWITCHTEC_EVENT_EN_IRQ)
833 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
834 	if (hdr & SWITCHTEC_EVENT_EN_LOG)
835 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
836 	if (hdr & SWITCHTEC_EVENT_EN_CLI)
837 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
838 	if (hdr & SWITCHTEC_EVENT_FATAL)
839 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
840 
841 	return 0;
842 }
843 
844 static int ioctl_event_ctl(struct switchtec_dev *stdev,
845 	struct switchtec_ioctl_event_ctl __user *uctl)
846 {
847 	int ret;
848 	int nr_idxs;
849 	unsigned int event_flags;
850 	struct switchtec_ioctl_event_ctl ctl;
851 
852 	if (copy_from_user(&ctl, uctl, sizeof(ctl)))
853 		return -EFAULT;
854 
855 	if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
856 		return -EINVAL;
857 
858 	if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
859 		return -EINVAL;
860 
861 	if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
862 		if (event_regs[ctl.event_id].map_reg == global_ev_reg)
863 			nr_idxs = 1;
864 		else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
865 			nr_idxs = stdev->partition_count;
866 		else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
867 			nr_idxs = stdev->pff_csr_count;
868 		else
869 			return -EINVAL;
870 
871 		event_flags = ctl.flags;
872 		for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
873 			ctl.flags = event_flags;
874 			ret = event_ctl(stdev, &ctl);
875 			if (ret < 0)
876 				return ret;
877 		}
878 	} else {
879 		ret = event_ctl(stdev, &ctl);
880 		if (ret < 0)
881 			return ret;
882 	}
883 
884 	if (copy_to_user(uctl, &ctl, sizeof(ctl)))
885 		return -EFAULT;
886 
887 	return 0;
888 }
889 
890 static int ioctl_pff_to_port(struct switchtec_dev *stdev,
891 			     struct switchtec_ioctl_pff_port *up)
892 {
893 	int i, part;
894 	u32 reg;
895 	struct part_cfg_regs *pcfg;
896 	struct switchtec_ioctl_pff_port p;
897 
898 	if (copy_from_user(&p, up, sizeof(p)))
899 		return -EFAULT;
900 
901 	p.port = -1;
902 	for (part = 0; part < stdev->partition_count; part++) {
903 		pcfg = &stdev->mmio_part_cfg_all[part];
904 		p.partition = part;
905 
906 		reg = ioread32(&pcfg->usp_pff_inst_id);
907 		if (reg == p.pff) {
908 			p.port = 0;
909 			break;
910 		}
911 
912 		reg = ioread32(&pcfg->vep_pff_inst_id);
913 		if (reg == p.pff) {
914 			p.port = SWITCHTEC_IOCTL_PFF_VEP;
915 			break;
916 		}
917 
918 		for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
919 			reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
920 			if (reg != p.pff)
921 				continue;
922 
923 			p.port = i + 1;
924 			break;
925 		}
926 
927 		if (p.port != -1)
928 			break;
929 	}
930 
931 	if (copy_to_user(up, &p, sizeof(p)))
932 		return -EFAULT;
933 
934 	return 0;
935 }
936 
937 static int ioctl_port_to_pff(struct switchtec_dev *stdev,
938 			     struct switchtec_ioctl_pff_port *up)
939 {
940 	struct switchtec_ioctl_pff_port p;
941 	struct part_cfg_regs *pcfg;
942 
943 	if (copy_from_user(&p, up, sizeof(p)))
944 		return -EFAULT;
945 
946 	if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
947 		pcfg = stdev->mmio_part_cfg;
948 	else if (p.partition < stdev->partition_count)
949 		pcfg = &stdev->mmio_part_cfg_all[p.partition];
950 	else
951 		return -EINVAL;
952 
953 	switch (p.port) {
954 	case 0:
955 		p.pff = ioread32(&pcfg->usp_pff_inst_id);
956 		break;
957 	case SWITCHTEC_IOCTL_PFF_VEP:
958 		p.pff = ioread32(&pcfg->vep_pff_inst_id);
959 		break;
960 	default:
961 		if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
962 			return -EINVAL;
963 		p.port = array_index_nospec(p.port,
964 					ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
965 		p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
966 		break;
967 	}
968 
969 	if (copy_to_user(up, &p, sizeof(p)))
970 		return -EFAULT;
971 
972 	return 0;
973 }
974 
975 static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
976 				unsigned long arg)
977 {
978 	struct switchtec_user *stuser = filp->private_data;
979 	struct switchtec_dev *stdev = stuser->stdev;
980 	int rc;
981 	void __user *argp = (void __user *)arg;
982 
983 	rc = lock_mutex_and_test_alive(stdev);
984 	if (rc)
985 		return rc;
986 
987 	switch (cmd) {
988 	case SWITCHTEC_IOCTL_FLASH_INFO:
989 		rc = ioctl_flash_info(stdev, argp);
990 		break;
991 	case SWITCHTEC_IOCTL_FLASH_PART_INFO:
992 		rc = ioctl_flash_part_info(stdev, argp);
993 		break;
994 	case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY:
995 		rc = ioctl_event_summary(stdev, stuser, argp,
996 					 sizeof(struct switchtec_ioctl_event_summary_legacy));
997 		break;
998 	case SWITCHTEC_IOCTL_EVENT_CTL:
999 		rc = ioctl_event_ctl(stdev, argp);
1000 		break;
1001 	case SWITCHTEC_IOCTL_PFF_TO_PORT:
1002 		rc = ioctl_pff_to_port(stdev, argp);
1003 		break;
1004 	case SWITCHTEC_IOCTL_PORT_TO_PFF:
1005 		rc = ioctl_port_to_pff(stdev, argp);
1006 		break;
1007 	case SWITCHTEC_IOCTL_EVENT_SUMMARY:
1008 		rc = ioctl_event_summary(stdev, stuser, argp,
1009 					 sizeof(struct switchtec_ioctl_event_summary));
1010 		break;
1011 	default:
1012 		rc = -ENOTTY;
1013 		break;
1014 	}
1015 
1016 	mutex_unlock(&stdev->mrpc_mutex);
1017 	return rc;
1018 }
1019 
1020 static const struct file_operations switchtec_fops = {
1021 	.owner = THIS_MODULE,
1022 	.open = switchtec_dev_open,
1023 	.release = switchtec_dev_release,
1024 	.write = switchtec_dev_write,
1025 	.read = switchtec_dev_read,
1026 	.poll = switchtec_dev_poll,
1027 	.unlocked_ioctl = switchtec_dev_ioctl,
1028 	.compat_ioctl = switchtec_dev_ioctl,
1029 };
1030 
1031 static void link_event_work(struct work_struct *work)
1032 {
1033 	struct switchtec_dev *stdev;
1034 
1035 	stdev = container_of(work, struct switchtec_dev, link_event_work);
1036 
1037 	if (stdev->link_notifier)
1038 		stdev->link_notifier(stdev);
1039 }
1040 
1041 static void check_link_state_events(struct switchtec_dev *stdev)
1042 {
1043 	int idx;
1044 	u32 reg;
1045 	int count;
1046 	int occurred = 0;
1047 
1048 	for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1049 		reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
1050 		dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
1051 		count = (reg >> 5) & 0xFF;
1052 
1053 		if (count != stdev->link_event_count[idx]) {
1054 			occurred = 1;
1055 			stdev->link_event_count[idx] = count;
1056 		}
1057 	}
1058 
1059 	if (occurred)
1060 		schedule_work(&stdev->link_event_work);
1061 }
1062 
1063 static void enable_link_state_events(struct switchtec_dev *stdev)
1064 {
1065 	int idx;
1066 
1067 	for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1068 		iowrite32(SWITCHTEC_EVENT_CLEAR |
1069 			  SWITCHTEC_EVENT_EN_IRQ,
1070 			  &stdev->mmio_pff_csr[idx].link_state_hdr);
1071 	}
1072 }
1073 
1074 static void enable_dma_mrpc(struct switchtec_dev *stdev)
1075 {
1076 	writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
1077 	flush_wc_buf(stdev);
1078 	iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
1079 }
1080 
1081 static void stdev_release(struct device *dev)
1082 {
1083 	struct switchtec_dev *stdev = to_stdev(dev);
1084 
1085 	if (stdev->dma_mrpc) {
1086 		iowrite32(0, &stdev->mmio_mrpc->dma_en);
1087 		flush_wc_buf(stdev);
1088 		writeq(0, &stdev->mmio_mrpc->dma_addr);
1089 		dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
1090 				stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
1091 	}
1092 	kfree(stdev);
1093 }
1094 
1095 static void stdev_kill(struct switchtec_dev *stdev)
1096 {
1097 	struct switchtec_user *stuser, *tmpuser;
1098 
1099 	pci_clear_master(stdev->pdev);
1100 
1101 	cancel_delayed_work_sync(&stdev->mrpc_timeout);
1102 
1103 	/* Mark the hardware as unavailable and complete all completions */
1104 	mutex_lock(&stdev->mrpc_mutex);
1105 	stdev->alive = false;
1106 
1107 	/* Wake up and kill any users waiting on an MRPC request */
1108 	list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
1109 		complete_all(&stuser->comp);
1110 		list_del_init(&stuser->list);
1111 		stuser_put(stuser);
1112 	}
1113 
1114 	mutex_unlock(&stdev->mrpc_mutex);
1115 
1116 	/* Wake up any users waiting on event_wq */
1117 	wake_up_interruptible(&stdev->event_wq);
1118 }
1119 
1120 static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1121 {
1122 	struct switchtec_dev *stdev;
1123 	int minor;
1124 	struct device *dev;
1125 	struct cdev *cdev;
1126 	int rc;
1127 
1128 	stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
1129 			     dev_to_node(&pdev->dev));
1130 	if (!stdev)
1131 		return ERR_PTR(-ENOMEM);
1132 
1133 	stdev->alive = true;
1134 	stdev->pdev = pdev;
1135 	INIT_LIST_HEAD(&stdev->mrpc_queue);
1136 	mutex_init(&stdev->mrpc_mutex);
1137 	stdev->mrpc_busy = 0;
1138 	INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
1139 	INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
1140 	INIT_WORK(&stdev->link_event_work, link_event_work);
1141 	init_waitqueue_head(&stdev->event_wq);
1142 	atomic_set(&stdev->event_cnt, 0);
1143 
1144 	dev = &stdev->dev;
1145 	device_initialize(dev);
1146 	dev->class = switchtec_class;
1147 	dev->parent = &pdev->dev;
1148 	dev->groups = switchtec_device_groups;
1149 	dev->release = stdev_release;
1150 
1151 	minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
1152 			       GFP_KERNEL);
1153 	if (minor < 0) {
1154 		rc = minor;
1155 		goto err_put;
1156 	}
1157 
1158 	dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
1159 	dev_set_name(dev, "switchtec%d", minor);
1160 
1161 	cdev = &stdev->cdev;
1162 	cdev_init(cdev, &switchtec_fops);
1163 	cdev->owner = THIS_MODULE;
1164 
1165 	return stdev;
1166 
1167 err_put:
1168 	put_device(&stdev->dev);
1169 	return ERR_PTR(rc);
1170 }
1171 
1172 static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1173 {
1174 	size_t off = event_regs[eid].offset;
1175 	u32 __iomem *hdr_reg;
1176 	u32 hdr;
1177 
1178 	hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
1179 	hdr = ioread32(hdr_reg);
1180 
1181 	if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
1182 		return 0;
1183 
1184 	if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
1185 	    eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
1186 		return 0;
1187 
1188 	dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1189 	hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
1190 	iowrite32(hdr, hdr_reg);
1191 
1192 	return 1;
1193 }
1194 
1195 static int mask_all_events(struct switchtec_dev *stdev, int eid)
1196 {
1197 	int idx;
1198 	int count = 0;
1199 
1200 	if (event_regs[eid].map_reg == part_ev_reg) {
1201 		for (idx = 0; idx < stdev->partition_count; idx++)
1202 			count += mask_event(stdev, eid, idx);
1203 	} else if (event_regs[eid].map_reg == pff_ev_reg) {
1204 		for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1205 			if (!stdev->pff_local[idx])
1206 				continue;
1207 
1208 			count += mask_event(stdev, eid, idx);
1209 		}
1210 	} else {
1211 		count += mask_event(stdev, eid, 0);
1212 	}
1213 
1214 	return count;
1215 }
1216 
1217 static irqreturn_t switchtec_event_isr(int irq, void *dev)
1218 {
1219 	struct switchtec_dev *stdev = dev;
1220 	u32 reg;
1221 	irqreturn_t ret = IRQ_NONE;
1222 	int eid, event_count = 0;
1223 
1224 	reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
1225 	if (reg & SWITCHTEC_EVENT_OCCURRED) {
1226 		dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
1227 		ret = IRQ_HANDLED;
1228 		schedule_work(&stdev->mrpc_work);
1229 		iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
1230 	}
1231 
1232 	check_link_state_events(stdev);
1233 
1234 	for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
1235 		event_count += mask_all_events(stdev, eid);
1236 
1237 	if (event_count) {
1238 		atomic_inc(&stdev->event_cnt);
1239 		wake_up_interruptible(&stdev->event_wq);
1240 		dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
1241 			event_count);
1242 		return IRQ_HANDLED;
1243 	}
1244 
1245 	return ret;
1246 }
1247 
1248 
1249 static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
1250 {
1251 	struct switchtec_dev *stdev = dev;
1252 	irqreturn_t ret = IRQ_NONE;
1253 
1254 	iowrite32(SWITCHTEC_EVENT_CLEAR |
1255 		  SWITCHTEC_EVENT_EN_IRQ,
1256 		  &stdev->mmio_part_cfg->mrpc_comp_hdr);
1257 	schedule_work(&stdev->mrpc_work);
1258 
1259 	ret = IRQ_HANDLED;
1260 	return ret;
1261 }
1262 
1263 static int switchtec_init_isr(struct switchtec_dev *stdev)
1264 {
1265 	int nvecs;
1266 	int event_irq;
1267 	int dma_mrpc_irq;
1268 	int rc;
1269 
1270 	if (nirqs < 4)
1271 		nirqs = 4;
1272 
1273 	nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, nirqs,
1274 				      PCI_IRQ_MSIX | PCI_IRQ_MSI |
1275 				      PCI_IRQ_VIRTUAL);
1276 	if (nvecs < 0)
1277 		return nvecs;
1278 
1279 	event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
1280 	if (event_irq < 0 || event_irq >= nvecs)
1281 		return -EFAULT;
1282 
1283 	event_irq = pci_irq_vector(stdev->pdev, event_irq);
1284 	if (event_irq < 0)
1285 		return event_irq;
1286 
1287 	rc = devm_request_irq(&stdev->pdev->dev, event_irq,
1288 				switchtec_event_isr, 0,
1289 				KBUILD_MODNAME, stdev);
1290 
1291 	if (rc)
1292 		return rc;
1293 
1294 	if (!stdev->dma_mrpc)
1295 		return rc;
1296 
1297 	dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
1298 	if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs)
1299 		return -EFAULT;
1300 
1301 	dma_mrpc_irq  = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
1302 	if (dma_mrpc_irq < 0)
1303 		return dma_mrpc_irq;
1304 
1305 	rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
1306 				switchtec_dma_mrpc_isr, 0,
1307 				KBUILD_MODNAME, stdev);
1308 
1309 	return rc;
1310 }
1311 
1312 static void init_pff(struct switchtec_dev *stdev)
1313 {
1314 	int i;
1315 	u32 reg;
1316 	struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
1317 
1318 	for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
1319 		reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
1320 		if (reg != PCI_VENDOR_ID_MICROSEMI)
1321 			break;
1322 	}
1323 
1324 	stdev->pff_csr_count = i;
1325 
1326 	reg = ioread32(&pcfg->usp_pff_inst_id);
1327 	if (reg < SWITCHTEC_MAX_PFF_CSR)
1328 		stdev->pff_local[reg] = 1;
1329 
1330 	reg = ioread32(&pcfg->vep_pff_inst_id);
1331 	if (reg < SWITCHTEC_MAX_PFF_CSR)
1332 		stdev->pff_local[reg] = 1;
1333 
1334 	for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1335 		reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1336 		if (reg < SWITCHTEC_MAX_PFF_CSR)
1337 			stdev->pff_local[reg] = 1;
1338 	}
1339 }
1340 
1341 static int switchtec_init_pci(struct switchtec_dev *stdev,
1342 			      struct pci_dev *pdev)
1343 {
1344 	int rc;
1345 	void __iomem *map;
1346 	unsigned long res_start, res_len;
1347 
1348 	rc = pcim_enable_device(pdev);
1349 	if (rc)
1350 		return rc;
1351 
1352 	rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1353 	if (rc)
1354 		return rc;
1355 
1356 	pci_set_master(pdev);
1357 
1358 	res_start = pci_resource_start(pdev, 0);
1359 	res_len = pci_resource_len(pdev, 0);
1360 
1361 	if (!devm_request_mem_region(&pdev->dev, res_start,
1362 				     res_len, KBUILD_MODNAME))
1363 		return -EBUSY;
1364 
1365 	stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
1366 					   SWITCHTEC_GAS_TOP_CFG_OFFSET);
1367 	if (!stdev->mmio_mrpc)
1368 		return -ENOMEM;
1369 
1370 	map = devm_ioremap(&pdev->dev,
1371 			   res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET,
1372 			   res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET);
1373 	if (!map)
1374 		return -ENOMEM;
1375 
1376 	stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
1377 	stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
1378 	stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1379 	stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1380 	stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1381 	stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
1382 	stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1383 	stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1384 	stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1385 	stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1386 
1387 	if (stdev->partition_count < 1)
1388 		stdev->partition_count = 1;
1389 
1390 	init_pff(stdev);
1391 
1392 	pci_set_drvdata(pdev, stdev);
1393 
1394 	if (!use_dma_mrpc)
1395 		return 0;
1396 
1397 	if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
1398 		return 0;
1399 
1400 	stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
1401 					     sizeof(*stdev->dma_mrpc),
1402 					     &stdev->dma_mrpc_dma_addr,
1403 					     GFP_KERNEL);
1404 	if (stdev->dma_mrpc == NULL)
1405 		return -ENOMEM;
1406 
1407 	return 0;
1408 }
1409 
1410 static int switchtec_pci_probe(struct pci_dev *pdev,
1411 			       const struct pci_device_id *id)
1412 {
1413 	struct switchtec_dev *stdev;
1414 	int rc;
1415 
1416 	if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
1417 		request_module_nowait("ntb_hw_switchtec");
1418 
1419 	stdev = stdev_create(pdev);
1420 	if (IS_ERR(stdev))
1421 		return PTR_ERR(stdev);
1422 
1423 	rc = switchtec_init_pci(stdev, pdev);
1424 	if (rc)
1425 		goto err_put;
1426 
1427 	rc = switchtec_init_isr(stdev);
1428 	if (rc) {
1429 		dev_err(&stdev->dev, "failed to init isr.\n");
1430 		goto err_put;
1431 	}
1432 
1433 	iowrite32(SWITCHTEC_EVENT_CLEAR |
1434 		  SWITCHTEC_EVENT_EN_IRQ,
1435 		  &stdev->mmio_part_cfg->mrpc_comp_hdr);
1436 	enable_link_state_events(stdev);
1437 
1438 	if (stdev->dma_mrpc)
1439 		enable_dma_mrpc(stdev);
1440 
1441 	rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1442 	if (rc)
1443 		goto err_devadd;
1444 
1445 	dev_info(&stdev->dev, "Management device registered.\n");
1446 
1447 	return 0;
1448 
1449 err_devadd:
1450 	stdev_kill(stdev);
1451 err_put:
1452 	ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1453 	put_device(&stdev->dev);
1454 	return rc;
1455 }
1456 
1457 static void switchtec_pci_remove(struct pci_dev *pdev)
1458 {
1459 	struct switchtec_dev *stdev = pci_get_drvdata(pdev);
1460 
1461 	pci_set_drvdata(pdev, NULL);
1462 
1463 	cdev_device_del(&stdev->cdev, &stdev->dev);
1464 	ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1465 	dev_info(&stdev->dev, "unregistered.\n");
1466 	stdev_kill(stdev);
1467 	put_device(&stdev->dev);
1468 }
1469 
1470 #define SWITCHTEC_PCI_DEVICE(device_id) \
1471 	{ \
1472 		.vendor     = PCI_VENDOR_ID_MICROSEMI, \
1473 		.device     = device_id, \
1474 		.subvendor  = PCI_ANY_ID, \
1475 		.subdevice  = PCI_ANY_ID, \
1476 		.class      = (PCI_CLASS_MEMORY_OTHER << 8), \
1477 		.class_mask = 0xFFFFFFFF, \
1478 	}, \
1479 	{ \
1480 		.vendor     = PCI_VENDOR_ID_MICROSEMI, \
1481 		.device     = device_id, \
1482 		.subvendor  = PCI_ANY_ID, \
1483 		.subdevice  = PCI_ANY_ID, \
1484 		.class      = (PCI_CLASS_BRIDGE_OTHER << 8), \
1485 		.class_mask = 0xFFFFFFFF, \
1486 	}
1487 
1488 static const struct pci_device_id switchtec_pci_tbl[] = {
1489 	SWITCHTEC_PCI_DEVICE(0x8531),  //PFX 24xG3
1490 	SWITCHTEC_PCI_DEVICE(0x8532),  //PFX 32xG3
1491 	SWITCHTEC_PCI_DEVICE(0x8533),  //PFX 48xG3
1492 	SWITCHTEC_PCI_DEVICE(0x8534),  //PFX 64xG3
1493 	SWITCHTEC_PCI_DEVICE(0x8535),  //PFX 80xG3
1494 	SWITCHTEC_PCI_DEVICE(0x8536),  //PFX 96xG3
1495 	SWITCHTEC_PCI_DEVICE(0x8541),  //PSX 24xG3
1496 	SWITCHTEC_PCI_DEVICE(0x8542),  //PSX 32xG3
1497 	SWITCHTEC_PCI_DEVICE(0x8543),  //PSX 48xG3
1498 	SWITCHTEC_PCI_DEVICE(0x8544),  //PSX 64xG3
1499 	SWITCHTEC_PCI_DEVICE(0x8545),  //PSX 80xG3
1500 	SWITCHTEC_PCI_DEVICE(0x8546),  //PSX 96xG3
1501 	SWITCHTEC_PCI_DEVICE(0x8551),  //PAX 24XG3
1502 	SWITCHTEC_PCI_DEVICE(0x8552),  //PAX 32XG3
1503 	SWITCHTEC_PCI_DEVICE(0x8553),  //PAX 48XG3
1504 	SWITCHTEC_PCI_DEVICE(0x8554),  //PAX 64XG3
1505 	SWITCHTEC_PCI_DEVICE(0x8555),  //PAX 80XG3
1506 	SWITCHTEC_PCI_DEVICE(0x8556),  //PAX 96XG3
1507 	SWITCHTEC_PCI_DEVICE(0x8561),  //PFXL 24XG3
1508 	SWITCHTEC_PCI_DEVICE(0x8562),  //PFXL 32XG3
1509 	SWITCHTEC_PCI_DEVICE(0x8563),  //PFXL 48XG3
1510 	SWITCHTEC_PCI_DEVICE(0x8564),  //PFXL 64XG3
1511 	SWITCHTEC_PCI_DEVICE(0x8565),  //PFXL 80XG3
1512 	SWITCHTEC_PCI_DEVICE(0x8566),  //PFXL 96XG3
1513 	SWITCHTEC_PCI_DEVICE(0x8571),  //PFXI 24XG3
1514 	SWITCHTEC_PCI_DEVICE(0x8572),  //PFXI 32XG3
1515 	SWITCHTEC_PCI_DEVICE(0x8573),  //PFXI 48XG3
1516 	SWITCHTEC_PCI_DEVICE(0x8574),  //PFXI 64XG3
1517 	SWITCHTEC_PCI_DEVICE(0x8575),  //PFXI 80XG3
1518 	SWITCHTEC_PCI_DEVICE(0x8576),  //PFXI 96XG3
1519 	{0}
1520 };
1521 MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
1522 
1523 static struct pci_driver switchtec_pci_driver = {
1524 	.name		= KBUILD_MODNAME,
1525 	.id_table	= switchtec_pci_tbl,
1526 	.probe		= switchtec_pci_probe,
1527 	.remove		= switchtec_pci_remove,
1528 };
1529 
1530 static int __init switchtec_init(void)
1531 {
1532 	int rc;
1533 
1534 	rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
1535 				 "switchtec");
1536 	if (rc)
1537 		return rc;
1538 
1539 	switchtec_class = class_create(THIS_MODULE, "switchtec");
1540 	if (IS_ERR(switchtec_class)) {
1541 		rc = PTR_ERR(switchtec_class);
1542 		goto err_create_class;
1543 	}
1544 
1545 	rc = pci_register_driver(&switchtec_pci_driver);
1546 	if (rc)
1547 		goto err_pci_register;
1548 
1549 	pr_info(KBUILD_MODNAME ": loaded.\n");
1550 
1551 	return 0;
1552 
1553 err_pci_register:
1554 	class_destroy(switchtec_class);
1555 
1556 err_create_class:
1557 	unregister_chrdev_region(switchtec_devt, max_devices);
1558 
1559 	return rc;
1560 }
1561 module_init(switchtec_init);
1562 
1563 static void __exit switchtec_exit(void)
1564 {
1565 	pci_unregister_driver(&switchtec_pci_driver);
1566 	class_destroy(switchtec_class);
1567 	unregister_chrdev_region(switchtec_devt, max_devices);
1568 	ida_destroy(&switchtec_minor_ida);
1569 
1570 	pr_info(KBUILD_MODNAME ": unloaded.\n");
1571 }
1572 module_exit(switchtec_exit);
1573