xref: /linux/drivers/ntb/hw/mscc/ntb_hw_switchtec.c (revision 12cb203b1b3e2a43d6e3f5f5c6e2071636334fc2)
1 /*
2  * Microsemi Switchtec(tm) PCIe Management Driver
3  * Copyright (c) 2017, Microsemi Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <linux/switchtec.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/ntb.h>
22 
23 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
24 MODULE_VERSION("0.1");
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Microsemi Corporation");
27 
28 static ulong max_mw_size = SZ_2M;
29 module_param(max_mw_size, ulong, 0644);
30 MODULE_PARM_DESC(max_mw_size,
31 	"Max memory window size reported to the upper layer");
32 
33 static bool use_lut_mws;
34 module_param(use_lut_mws, bool, 0644);
35 MODULE_PARM_DESC(use_lut_mws,
36 		 "Enable the use of the LUT based memory windows");
37 
38 #ifndef ioread64
39 #ifdef readq
40 #define ioread64 readq
41 #else
42 #define ioread64 _ioread64
43 static inline u64 _ioread64(void __iomem *mmio)
44 {
45 	u64 low, high;
46 
47 	low = ioread32(mmio);
48 	high = ioread32(mmio + sizeof(u32));
49 	return low | (high << 32);
50 }
51 #endif
52 #endif
53 
54 #ifndef iowrite64
55 #ifdef writeq
56 #define iowrite64 writeq
57 #else
58 #define iowrite64 _iowrite64
59 static inline void _iowrite64(u64 val, void __iomem *mmio)
60 {
61 	iowrite32(val, mmio);
62 	iowrite32(val >> 32, mmio + sizeof(u32));
63 }
64 #endif
65 #endif
66 
67 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
68 #define MAX_MWS     128
69 
70 struct shared_mw {
71 	u32 magic;
72 	u32 link_sta;
73 	u32 partition_id;
74 	u64 mw_sizes[MAX_MWS];
75 	u32 spad[128];
76 };
77 
78 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
79 #define LUT_SIZE SZ_64K
80 
81 struct switchtec_ntb {
82 	struct ntb_dev ntb;
83 	struct switchtec_dev *stdev;
84 
85 	int self_partition;
86 	int peer_partition;
87 
88 	int doorbell_irq;
89 	int message_irq;
90 
91 	struct ntb_info_regs __iomem *mmio_ntb;
92 	struct ntb_ctrl_regs __iomem *mmio_ctrl;
93 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
94 	struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
95 	struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
96 	struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
97 
98 	struct shared_mw *self_shared;
99 	struct shared_mw __iomem *peer_shared;
100 	dma_addr_t self_shared_dma;
101 
102 	u64 db_mask;
103 	u64 db_valid_mask;
104 	int db_shift;
105 	int db_peer_shift;
106 
107 	/* synchronize rmw access of db_mask and hw reg */
108 	spinlock_t db_mask_lock;
109 
110 	int nr_direct_mw;
111 	int nr_lut_mw;
112 	int nr_rsvd_luts;
113 	int direct_mw_to_bar[MAX_DIRECT_MW];
114 
115 	int peer_nr_direct_mw;
116 	int peer_nr_lut_mw;
117 	int peer_direct_mw_to_bar[MAX_DIRECT_MW];
118 
119 	bool link_is_up;
120 	enum ntb_speed link_speed;
121 	enum ntb_width link_width;
122 };
123 
124 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
125 {
126 	return container_of(ntb, struct switchtec_ntb, ntb);
127 }
128 
129 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
130 				 struct ntb_ctrl_regs __iomem *ctl,
131 				 u32 op, int wait_status)
132 {
133 	static const char * const op_text[] = {
134 		[NTB_CTRL_PART_OP_LOCK] = "lock",
135 		[NTB_CTRL_PART_OP_CFG] = "configure",
136 		[NTB_CTRL_PART_OP_RESET] = "reset",
137 	};
138 
139 	int i;
140 	u32 ps;
141 	int status;
142 
143 	switch (op) {
144 	case NTB_CTRL_PART_OP_LOCK:
145 		status = NTB_CTRL_PART_STATUS_LOCKING;
146 		break;
147 	case NTB_CTRL_PART_OP_CFG:
148 		status = NTB_CTRL_PART_STATUS_CONFIGURING;
149 		break;
150 	case NTB_CTRL_PART_OP_RESET:
151 		status = NTB_CTRL_PART_STATUS_RESETTING;
152 		break;
153 	default:
154 		return -EINVAL;
155 	}
156 
157 	iowrite32(op, &ctl->partition_op);
158 
159 	for (i = 0; i < 1000; i++) {
160 		if (msleep_interruptible(50) != 0) {
161 			iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
162 			return -EINTR;
163 		}
164 
165 		ps = ioread32(&ctl->partition_status) & 0xFFFF;
166 
167 		if (ps != status)
168 			break;
169 	}
170 
171 	if (ps == wait_status)
172 		return 0;
173 
174 	if (ps == status) {
175 		dev_err(&sndev->stdev->dev,
176 			"Timed out while performing %s (%d). (%08x)\n",
177 			op_text[op], op,
178 			ioread32(&ctl->partition_status));
179 
180 		return -ETIMEDOUT;
181 	}
182 
183 	return -EIO;
184 }
185 
186 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
187 				  u32 val)
188 {
189 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg))
190 		return -EINVAL;
191 
192 	iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg);
193 
194 	return 0;
195 }
196 
197 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
198 {
199 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
200 	int nr_direct_mw = sndev->peer_nr_direct_mw;
201 	int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
202 
203 	if (pidx != NTB_DEF_PEER_IDX)
204 		return -EINVAL;
205 
206 	if (!use_lut_mws)
207 		nr_lut_mw = 0;
208 
209 	return nr_direct_mw + nr_lut_mw;
210 }
211 
212 static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
213 {
214 	return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
215 }
216 
217 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
218 {
219 	return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
220 }
221 
222 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
223 				      int widx, resource_size_t *addr_align,
224 				      resource_size_t *size_align,
225 				      resource_size_t *size_max)
226 {
227 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
228 	int lut;
229 	resource_size_t size;
230 
231 	if (pidx != NTB_DEF_PEER_IDX)
232 		return -EINVAL;
233 
234 	lut = widx >= sndev->peer_nr_direct_mw;
235 	size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
236 
237 	if (size == 0)
238 		return -EINVAL;
239 
240 	if (addr_align)
241 		*addr_align = lut ? size : SZ_4K;
242 
243 	if (size_align)
244 		*size_align = lut ? size : SZ_4K;
245 
246 	if (size_max)
247 		*size_max = size;
248 
249 	return 0;
250 }
251 
252 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
253 {
254 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
255 	int bar = sndev->peer_direct_mw_to_bar[idx];
256 	u32 ctl_val;
257 
258 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
259 	ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
260 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
261 	iowrite32(0, &ctl->bar_entry[bar].win_size);
262 	iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
263 }
264 
265 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
266 {
267 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
268 
269 	iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
270 }
271 
272 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
273 					dma_addr_t addr, resource_size_t size)
274 {
275 	int xlate_pos = ilog2(size);
276 	int bar = sndev->peer_direct_mw_to_bar[idx];
277 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
278 	u32 ctl_val;
279 
280 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
281 	ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
282 
283 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
284 	iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
285 	iowrite64(sndev->self_partition | addr,
286 		  &ctl->bar_entry[bar].xlate_addr);
287 }
288 
289 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
290 				     dma_addr_t addr, resource_size_t size)
291 {
292 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
293 
294 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
295 		  &ctl->lut_entry[peer_lut_index(sndev, idx)]);
296 }
297 
298 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
299 				      dma_addr_t addr, resource_size_t size)
300 {
301 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
302 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
303 	int xlate_pos = ilog2(size);
304 	int nr_direct_mw = sndev->peer_nr_direct_mw;
305 	int rc;
306 
307 	if (pidx != NTB_DEF_PEER_IDX)
308 		return -EINVAL;
309 
310 	dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
311 		widx, pidx, &addr, &size);
312 
313 	if (widx >= switchtec_ntb_mw_count(ntb, pidx))
314 		return -EINVAL;
315 
316 	if (xlate_pos < 12)
317 		return -EINVAL;
318 
319 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
320 				   NTB_CTRL_PART_STATUS_LOCKED);
321 	if (rc)
322 		return rc;
323 
324 	if (addr == 0 || size == 0) {
325 		if (widx < nr_direct_mw)
326 			switchtec_ntb_mw_clr_direct(sndev, widx);
327 		else
328 			switchtec_ntb_mw_clr_lut(sndev, widx);
329 	} else {
330 		if (widx < nr_direct_mw)
331 			switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
332 		else
333 			switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
334 	}
335 
336 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
337 				   NTB_CTRL_PART_STATUS_NORMAL);
338 
339 	if (rc == -EIO) {
340 		dev_err(&sndev->stdev->dev,
341 			"Hardware reported an error configuring mw %d: %08x\n",
342 			widx, ioread32(&ctl->bar_error));
343 
344 		if (widx < nr_direct_mw)
345 			switchtec_ntb_mw_clr_direct(sndev, widx);
346 		else
347 			switchtec_ntb_mw_clr_lut(sndev, widx);
348 
349 		switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
350 				      NTB_CTRL_PART_STATUS_NORMAL);
351 	}
352 
353 	return rc;
354 }
355 
356 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
357 {
358 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
359 	int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
360 
361 	return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
362 }
363 
364 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
365 					 int idx, phys_addr_t *base,
366 					 resource_size_t *size)
367 {
368 	int bar = sndev->direct_mw_to_bar[idx];
369 	size_t offset = 0;
370 
371 	if (bar < 0)
372 		return -EINVAL;
373 
374 	if (idx == 0) {
375 		/*
376 		 * This is the direct BAR shared with the LUTs
377 		 * which means the actual window will be offset
378 		 * by the size of all the LUT entries.
379 		 */
380 
381 		offset = LUT_SIZE * sndev->nr_lut_mw;
382 	}
383 
384 	if (base)
385 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
386 
387 	if (size) {
388 		*size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
389 		if (offset && *size > offset)
390 			*size = offset;
391 
392 		if (*size > max_mw_size)
393 			*size = max_mw_size;
394 	}
395 
396 	return 0;
397 }
398 
399 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
400 				      int idx, phys_addr_t *base,
401 				      resource_size_t *size)
402 {
403 	int bar = sndev->direct_mw_to_bar[0];
404 	int offset;
405 
406 	offset = LUT_SIZE * lut_index(sndev, idx);
407 
408 	if (base)
409 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
410 
411 	if (size)
412 		*size = LUT_SIZE;
413 
414 	return 0;
415 }
416 
417 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
418 					  phys_addr_t *base,
419 					  resource_size_t *size)
420 {
421 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
422 
423 	if (idx < sndev->nr_direct_mw)
424 		return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
425 	else if (idx < switchtec_ntb_peer_mw_count(ntb))
426 		return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
427 	else
428 		return -EINVAL;
429 }
430 
431 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
432 					  int partition,
433 					  enum ntb_speed *speed,
434 					  enum ntb_width *width)
435 {
436 	struct switchtec_dev *stdev = sndev->stdev;
437 
438 	u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
439 	u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
440 
441 	if (speed)
442 		*speed = (linksta >> 16) & 0xF;
443 
444 	if (width)
445 		*width = (linksta >> 20) & 0x3F;
446 }
447 
448 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
449 {
450 	enum ntb_speed self_speed, peer_speed;
451 	enum ntb_width self_width, peer_width;
452 
453 	if (!sndev->link_is_up) {
454 		sndev->link_speed = NTB_SPEED_NONE;
455 		sndev->link_width = NTB_WIDTH_NONE;
456 		return;
457 	}
458 
459 	switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
460 				      &self_speed, &self_width);
461 	switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
462 				      &peer_speed, &peer_width);
463 
464 	sndev->link_speed = min(self_speed, peer_speed);
465 	sndev->link_width = min(self_width, peer_width);
466 }
467 
468 enum {
469 	LINK_MESSAGE = 0,
470 	MSG_LINK_UP = 1,
471 	MSG_LINK_DOWN = 2,
472 	MSG_CHECK_LINK = 3,
473 };
474 
475 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
476 {
477 	int link_sta;
478 	int old = sndev->link_is_up;
479 
480 	link_sta = sndev->self_shared->link_sta;
481 	if (link_sta) {
482 		u64 peer = ioread64(&sndev->peer_shared->magic);
483 
484 		if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
485 			link_sta = peer >> 32;
486 		else
487 			link_sta = 0;
488 	}
489 
490 	sndev->link_is_up = link_sta;
491 	switchtec_ntb_set_link_speed(sndev);
492 
493 	if (link_sta != old) {
494 		switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
495 		ntb_link_event(&sndev->ntb);
496 		dev_info(&sndev->stdev->dev, "ntb link %s\n",
497 			 link_sta ? "up" : "down");
498 	}
499 }
500 
501 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
502 {
503 	struct switchtec_ntb *sndev = stdev->sndev;
504 
505 	switchtec_ntb_check_link(sndev);
506 }
507 
508 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
509 				    enum ntb_speed *speed,
510 				    enum ntb_width *width)
511 {
512 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
513 
514 	if (speed)
515 		*speed = sndev->link_speed;
516 	if (width)
517 		*width = sndev->link_width;
518 
519 	return sndev->link_is_up;
520 }
521 
522 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
523 				     enum ntb_speed max_speed,
524 				     enum ntb_width max_width)
525 {
526 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
527 
528 	dev_dbg(&sndev->stdev->dev, "enabling link\n");
529 
530 	sndev->self_shared->link_sta = 1;
531 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
532 
533 	switchtec_ntb_check_link(sndev);
534 
535 	return 0;
536 }
537 
538 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
539 {
540 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
541 
542 	dev_dbg(&sndev->stdev->dev, "disabling link\n");
543 
544 	sndev->self_shared->link_sta = 0;
545 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
546 
547 	switchtec_ntb_check_link(sndev);
548 
549 	return 0;
550 }
551 
552 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
553 {
554 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
555 
556 	return sndev->db_valid_mask;
557 }
558 
559 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
560 {
561 	return 1;
562 }
563 
564 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
565 {
566 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
567 
568 	if (db_vector < 0 || db_vector > 1)
569 		return 0;
570 
571 	return sndev->db_valid_mask;
572 }
573 
574 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
575 {
576 	u64 ret;
577 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
578 
579 	ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
580 
581 	return ret & sndev->db_valid_mask;
582 }
583 
584 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
585 {
586 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
587 
588 	iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
589 
590 	return 0;
591 }
592 
593 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
594 {
595 	unsigned long irqflags;
596 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
597 
598 	if (db_bits & ~sndev->db_valid_mask)
599 		return -EINVAL;
600 
601 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
602 
603 	sndev->db_mask |= db_bits << sndev->db_shift;
604 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
605 
606 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
607 
608 	return 0;
609 }
610 
611 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
612 {
613 	unsigned long irqflags;
614 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
615 
616 	if (db_bits & ~sndev->db_valid_mask)
617 		return -EINVAL;
618 
619 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
620 
621 	sndev->db_mask &= ~(db_bits << sndev->db_shift);
622 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
623 
624 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
625 
626 	return 0;
627 }
628 
629 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
630 {
631 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
632 
633 	return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
634 }
635 
636 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
637 				      phys_addr_t *db_addr,
638 				      resource_size_t *db_size)
639 {
640 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
641 	unsigned long offset;
642 
643 	offset = (unsigned long)sndev->mmio_self_dbmsg->odb -
644 		(unsigned long)sndev->stdev->mmio;
645 
646 	offset += sndev->db_shift / 8;
647 
648 	if (db_addr)
649 		*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
650 	if (db_size)
651 		*db_size = sizeof(u32);
652 
653 	return 0;
654 }
655 
656 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
657 {
658 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
659 
660 	iowrite64(db_bits << sndev->db_peer_shift,
661 		  &sndev->mmio_self_dbmsg->odb);
662 
663 	return 0;
664 }
665 
666 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
667 {
668 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
669 
670 	return ARRAY_SIZE(sndev->self_shared->spad);
671 }
672 
673 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
674 {
675 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
676 
677 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
678 		return 0;
679 
680 	if (!sndev->self_shared)
681 		return 0;
682 
683 	return sndev->self_shared->spad[idx];
684 }
685 
686 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
687 {
688 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
689 
690 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
691 		return -EINVAL;
692 
693 	if (!sndev->self_shared)
694 		return -EIO;
695 
696 	sndev->self_shared->spad[idx] = val;
697 
698 	return 0;
699 }
700 
701 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
702 					int sidx)
703 {
704 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
705 
706 	if (pidx != NTB_DEF_PEER_IDX)
707 		return -EINVAL;
708 
709 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
710 		return 0;
711 
712 	if (!sndev->peer_shared)
713 		return 0;
714 
715 	return ioread32(&sndev->peer_shared->spad[sidx]);
716 }
717 
718 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
719 					 int sidx, u32 val)
720 {
721 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
722 
723 	if (pidx != NTB_DEF_PEER_IDX)
724 		return -EINVAL;
725 
726 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
727 		return -EINVAL;
728 
729 	if (!sndev->peer_shared)
730 		return -EIO;
731 
732 	iowrite32(val, &sndev->peer_shared->spad[sidx]);
733 
734 	return 0;
735 }
736 
737 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
738 					int sidx, phys_addr_t *spad_addr)
739 {
740 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
741 	unsigned long offset;
742 
743 	if (pidx != NTB_DEF_PEER_IDX)
744 		return -EINVAL;
745 
746 	offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
747 		(unsigned long)sndev->stdev->mmio;
748 
749 	if (spad_addr)
750 		*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
751 
752 	return 0;
753 }
754 
755 static const struct ntb_dev_ops switchtec_ntb_ops = {
756 	.mw_count		= switchtec_ntb_mw_count,
757 	.mw_get_align		= switchtec_ntb_mw_get_align,
758 	.mw_set_trans		= switchtec_ntb_mw_set_trans,
759 	.peer_mw_count		= switchtec_ntb_peer_mw_count,
760 	.peer_mw_get_addr	= switchtec_ntb_peer_mw_get_addr,
761 	.link_is_up		= switchtec_ntb_link_is_up,
762 	.link_enable		= switchtec_ntb_link_enable,
763 	.link_disable		= switchtec_ntb_link_disable,
764 	.db_valid_mask		= switchtec_ntb_db_valid_mask,
765 	.db_vector_count	= switchtec_ntb_db_vector_count,
766 	.db_vector_mask		= switchtec_ntb_db_vector_mask,
767 	.db_read		= switchtec_ntb_db_read,
768 	.db_clear		= switchtec_ntb_db_clear,
769 	.db_set_mask		= switchtec_ntb_db_set_mask,
770 	.db_clear_mask		= switchtec_ntb_db_clear_mask,
771 	.db_read_mask		= switchtec_ntb_db_read_mask,
772 	.peer_db_addr		= switchtec_ntb_peer_db_addr,
773 	.peer_db_set		= switchtec_ntb_peer_db_set,
774 	.spad_count		= switchtec_ntb_spad_count,
775 	.spad_read		= switchtec_ntb_spad_read,
776 	.spad_write		= switchtec_ntb_spad_write,
777 	.peer_spad_read		= switchtec_ntb_peer_spad_read,
778 	.peer_spad_write	= switchtec_ntb_peer_spad_write,
779 	.peer_spad_addr		= switchtec_ntb_peer_spad_addr,
780 };
781 
782 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
783 {
784 	u64 tpart_vec;
785 	int self;
786 	u64 part_map;
787 	int bit;
788 
789 	sndev->ntb.pdev = sndev->stdev->pdev;
790 	sndev->ntb.topo = NTB_TOPO_SWITCH;
791 	sndev->ntb.ops = &switchtec_ntb_ops;
792 
793 	sndev->self_partition = sndev->stdev->partition;
794 
795 	sndev->mmio_ntb = sndev->stdev->mmio_ntb;
796 
797 	self = sndev->self_partition;
798 	tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
799 	tpart_vec <<= 32;
800 	tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
801 
802 	part_map = ioread64(&sndev->mmio_ntb->ep_map);
803 	part_map &= ~(1 << sndev->self_partition);
804 
805 	if (!ffs(tpart_vec)) {
806 		if (sndev->stdev->partition_count != 2) {
807 			dev_err(&sndev->stdev->dev,
808 				"ntb target partition not defined\n");
809 			return -ENODEV;
810 		}
811 
812 		bit = ffs(part_map);
813 		if (!bit) {
814 			dev_err(&sndev->stdev->dev,
815 				"peer partition is not NT partition\n");
816 			return -ENODEV;
817 		}
818 
819 		sndev->peer_partition = bit - 1;
820 	} else {
821 		if (ffs(tpart_vec) != fls(tpart_vec)) {
822 			dev_err(&sndev->stdev->dev,
823 				"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
824 			return -ENODEV;
825 		}
826 
827 		sndev->peer_partition = ffs(tpart_vec) - 1;
828 		if (!(part_map && (1 << sndev->peer_partition))) {
829 			dev_err(&sndev->stdev->dev,
830 				"ntb target partition is not NT partition\n");
831 			return -ENODEV;
832 		}
833 	}
834 
835 	dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
836 		sndev->self_partition, sndev->stdev->partition_count);
837 
838 	sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
839 		SWITCHTEC_NTB_REG_CTRL_OFFSET;
840 	sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
841 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
842 
843 	sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
844 	sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
845 	sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
846 
847 	return 0;
848 }
849 
850 static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
851 			       struct ntb_ctrl_regs __iomem *ctl,
852 			       int lut_idx, int partition,
853 			       dma_addr_t addr)
854 {
855 	int peer_bar = sndev->peer_direct_mw_to_bar[0];
856 	u32 ctl_val;
857 	int rc;
858 
859 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
860 				   NTB_CTRL_PART_STATUS_LOCKED);
861 	if (rc)
862 		return rc;
863 
864 	ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
865 	ctl_val &= 0xFF;
866 	ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
867 	ctl_val |= ilog2(LUT_SIZE) << 8;
868 	ctl_val |= (sndev->nr_lut_mw - 1) << 14;
869 	iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
870 
871 	iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
872 		  &ctl->lut_entry[lut_idx]);
873 
874 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
875 				   NTB_CTRL_PART_STATUS_NORMAL);
876 	if (rc) {
877 		u32 bar_error, lut_error;
878 
879 		bar_error = ioread32(&ctl->bar_error);
880 		lut_error = ioread32(&ctl->lut_error);
881 		dev_err(&sndev->stdev->dev,
882 			"Error setting up reserved lut window: %08x / %08x\n",
883 			bar_error, lut_error);
884 		return rc;
885 	}
886 
887 	return 0;
888 }
889 
890 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
891 {
892 	int i;
893 	int cnt = 0;
894 
895 	for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
896 		u32 r = ioread32(&ctrl->bar_entry[i].ctl);
897 
898 		if (r & NTB_CTRL_BAR_VALID)
899 			map[cnt++] = i;
900 	}
901 
902 	return cnt;
903 }
904 
905 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
906 {
907 	sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
908 				       sndev->mmio_self_ctrl);
909 
910 	sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
911 	sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
912 
913 	dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
914 		sndev->nr_direct_mw, sndev->nr_lut_mw);
915 
916 	sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
917 					    sndev->mmio_peer_ctrl);
918 
919 	sndev->peer_nr_lut_mw =
920 		ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
921 	sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
922 
923 	dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
924 		sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
925 
926 }
927 
928 /*
929  * There are 64 doorbells in the switch hardware but this is
930  * shared among all partitions. So we must split them in half
931  * (32 for each partition). However, the message interrupts are
932  * also shared with the top 4 doorbells so we just limit this to
933  * 28 doorbells per partition
934  */
935 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
936 {
937 	sndev->db_valid_mask = 0x0FFFFFFF;
938 
939 	if (sndev->self_partition < sndev->peer_partition) {
940 		sndev->db_shift = 0;
941 		sndev->db_peer_shift = 32;
942 	} else {
943 		sndev->db_shift = 32;
944 		sndev->db_peer_shift = 0;
945 	}
946 
947 	sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
948 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
949 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
950 		  &sndev->mmio_self_dbmsg->odb_mask);
951 }
952 
953 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
954 {
955 	int i;
956 	u32 msg_map = 0;
957 
958 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
959 		int m = i | sndev->peer_partition << 2;
960 
961 		msg_map |= m << i * 8;
962 	}
963 
964 	iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
965 
966 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
967 		iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
968 			  &sndev->mmio_self_dbmsg->imsg[i]);
969 }
970 
971 static int switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
972 {
973 	int rc = 0;
974 	u16 req_id;
975 	u32 error;
976 
977 	req_id = ioread16(&sndev->mmio_ntb->requester_id);
978 
979 	if (ioread32(&sndev->mmio_self_ctrl->req_id_table_size) < 2) {
980 		dev_err(&sndev->stdev->dev,
981 			"Not enough requester IDs available\n");
982 		return -EFAULT;
983 	}
984 
985 	rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
986 				   NTB_CTRL_PART_OP_LOCK,
987 				   NTB_CTRL_PART_STATUS_LOCKED);
988 	if (rc)
989 		return rc;
990 
991 	iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
992 		  &sndev->mmio_self_ctrl->partition_ctrl);
993 
994 	/*
995 	 * Root Complex Requester ID (which is 0:00.0)
996 	 */
997 	iowrite32(0 << 16 | NTB_CTRL_REQ_ID_EN,
998 		  &sndev->mmio_self_ctrl->req_id_table[0]);
999 
1000 	/*
1001 	 * Host Bridge Requester ID (as read from the mmap address)
1002 	 */
1003 	iowrite32(req_id << 16 | NTB_CTRL_REQ_ID_EN,
1004 		  &sndev->mmio_self_ctrl->req_id_table[1]);
1005 
1006 	rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
1007 				   NTB_CTRL_PART_OP_CFG,
1008 				   NTB_CTRL_PART_STATUS_NORMAL);
1009 	if (rc == -EIO) {
1010 		error = ioread32(&sndev->mmio_self_ctrl->req_id_error);
1011 		dev_err(&sndev->stdev->dev,
1012 			"Error setting up the requester ID table: %08x\n",
1013 			error);
1014 	}
1015 
1016 	return rc;
1017 }
1018 
1019 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
1020 {
1021 	int i;
1022 
1023 	memset(sndev->self_shared, 0, LUT_SIZE);
1024 	sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
1025 	sndev->self_shared->partition_id = sndev->stdev->partition;
1026 
1027 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1028 		int bar = sndev->direct_mw_to_bar[i];
1029 		resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
1030 
1031 		if (i == 0)
1032 			sz = min_t(resource_size_t, sz,
1033 				   LUT_SIZE * sndev->nr_lut_mw);
1034 
1035 		sndev->self_shared->mw_sizes[i] = sz;
1036 	}
1037 
1038 	for (i = 0; i < sndev->nr_lut_mw; i++) {
1039 		int idx = sndev->nr_direct_mw + i;
1040 
1041 		sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
1042 	}
1043 }
1044 
1045 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1046 {
1047 	int self_bar = sndev->direct_mw_to_bar[0];
1048 	int rc;
1049 
1050 	sndev->nr_rsvd_luts++;
1051 	sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
1052 						 LUT_SIZE,
1053 						 &sndev->self_shared_dma,
1054 						 GFP_KERNEL);
1055 	if (!sndev->self_shared) {
1056 		dev_err(&sndev->stdev->dev,
1057 			"unable to allocate memory for shared mw\n");
1058 		return -ENOMEM;
1059 	}
1060 
1061 	switchtec_ntb_init_shared(sndev);
1062 
1063 	rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
1064 				 sndev->self_partition,
1065 				 sndev->self_shared_dma);
1066 	if (rc)
1067 		goto unalloc_and_exit;
1068 
1069 	sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
1070 	if (!sndev->peer_shared) {
1071 		rc = -ENOMEM;
1072 		goto unalloc_and_exit;
1073 	}
1074 
1075 	dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
1076 	return 0;
1077 
1078 unalloc_and_exit:
1079 	dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1080 			  sndev->self_shared, sndev->self_shared_dma);
1081 
1082 	return rc;
1083 }
1084 
1085 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1086 {
1087 	if (sndev->peer_shared)
1088 		pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1089 
1090 	if (sndev->self_shared)
1091 		dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1092 				  sndev->self_shared,
1093 				  sndev->self_shared_dma);
1094 	sndev->nr_rsvd_luts--;
1095 }
1096 
1097 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1098 {
1099 	struct switchtec_ntb *sndev = dev;
1100 
1101 	dev_dbg(&sndev->stdev->dev, "doorbell\n");
1102 
1103 	ntb_db_event(&sndev->ntb, 0);
1104 
1105 	return IRQ_HANDLED;
1106 }
1107 
1108 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1109 {
1110 	int i;
1111 	struct switchtec_ntb *sndev = dev;
1112 
1113 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1114 		u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1115 
1116 		if (msg & NTB_DBMSG_IMSG_STATUS) {
1117 			dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
1118 				i, (u32)msg);
1119 			iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1120 
1121 			if (i == LINK_MESSAGE)
1122 				switchtec_ntb_check_link(sndev);
1123 		}
1124 	}
1125 
1126 	return IRQ_HANDLED;
1127 }
1128 
1129 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1130 {
1131 	int i;
1132 	int rc;
1133 	int doorbell_irq = 0;
1134 	int message_irq = 0;
1135 	int event_irq;
1136 	int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1137 
1138 	event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1139 
1140 	while (doorbell_irq == event_irq)
1141 		doorbell_irq++;
1142 	while (message_irq == doorbell_irq ||
1143 	       message_irq == event_irq)
1144 		message_irq++;
1145 
1146 	dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
1147 		event_irq, doorbell_irq, message_irq);
1148 
1149 	for (i = 0; i < idb_vecs - 4; i++)
1150 		iowrite8(doorbell_irq,
1151 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1152 
1153 	for (; i < idb_vecs; i++)
1154 		iowrite8(message_irq,
1155 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1156 
1157 	sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1158 	sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1159 
1160 	rc = request_irq(sndev->doorbell_irq,
1161 			 switchtec_ntb_doorbell_isr, 0,
1162 			 "switchtec_ntb_doorbell", sndev);
1163 	if (rc)
1164 		return rc;
1165 
1166 	rc = request_irq(sndev->message_irq,
1167 			 switchtec_ntb_message_isr, 0,
1168 			 "switchtec_ntb_message", sndev);
1169 	if (rc) {
1170 		free_irq(sndev->doorbell_irq, sndev);
1171 		return rc;
1172 	}
1173 
1174 	return 0;
1175 }
1176 
1177 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1178 {
1179 	free_irq(sndev->doorbell_irq, sndev);
1180 	free_irq(sndev->message_irq, sndev);
1181 }
1182 
1183 static int switchtec_ntb_add(struct device *dev,
1184 			     struct class_interface *class_intf)
1185 {
1186 	struct switchtec_dev *stdev = to_stdev(dev);
1187 	struct switchtec_ntb *sndev;
1188 	int rc;
1189 
1190 	stdev->sndev = NULL;
1191 
1192 	if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
1193 		return -ENODEV;
1194 
1195 	sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1196 	if (!sndev)
1197 		return -ENOMEM;
1198 
1199 	sndev->stdev = stdev;
1200 	rc = switchtec_ntb_init_sndev(sndev);
1201 	if (rc)
1202 		goto free_and_exit;
1203 
1204 	switchtec_ntb_init_mw(sndev);
1205 	switchtec_ntb_init_db(sndev);
1206 	switchtec_ntb_init_msgs(sndev);
1207 
1208 	rc = switchtec_ntb_init_req_id_table(sndev);
1209 	if (rc)
1210 		goto free_and_exit;
1211 
1212 	rc = switchtec_ntb_init_shared_mw(sndev);
1213 	if (rc)
1214 		goto free_and_exit;
1215 
1216 	rc = switchtec_ntb_init_db_msg_irq(sndev);
1217 	if (rc)
1218 		goto deinit_shared_and_exit;
1219 
1220 	rc = ntb_register_device(&sndev->ntb);
1221 	if (rc)
1222 		goto deinit_and_exit;
1223 
1224 	stdev->sndev = sndev;
1225 	stdev->link_notifier = switchtec_ntb_link_notification;
1226 	dev_info(dev, "NTB device registered\n");
1227 
1228 	return 0;
1229 
1230 deinit_and_exit:
1231 	switchtec_ntb_deinit_db_msg_irq(sndev);
1232 deinit_shared_and_exit:
1233 	switchtec_ntb_deinit_shared_mw(sndev);
1234 free_and_exit:
1235 	kfree(sndev);
1236 	dev_err(dev, "failed to register ntb device: %d\n", rc);
1237 	return rc;
1238 }
1239 
1240 void switchtec_ntb_remove(struct device *dev,
1241 			  struct class_interface *class_intf)
1242 {
1243 	struct switchtec_dev *stdev = to_stdev(dev);
1244 	struct switchtec_ntb *sndev = stdev->sndev;
1245 
1246 	if (!sndev)
1247 		return;
1248 
1249 	stdev->link_notifier = NULL;
1250 	stdev->sndev = NULL;
1251 	ntb_unregister_device(&sndev->ntb);
1252 	switchtec_ntb_deinit_db_msg_irq(sndev);
1253 	switchtec_ntb_deinit_shared_mw(sndev);
1254 	kfree(sndev);
1255 	dev_info(dev, "ntb device unregistered\n");
1256 }
1257 
1258 static struct class_interface switchtec_interface  = {
1259 	.add_dev = switchtec_ntb_add,
1260 	.remove_dev = switchtec_ntb_remove,
1261 };
1262 
1263 static int __init switchtec_ntb_init(void)
1264 {
1265 	switchtec_interface.class = switchtec_class;
1266 	return class_interface_register(&switchtec_interface);
1267 }
1268 module_init(switchtec_ntb_init);
1269 
1270 static void __exit switchtec_ntb_exit(void)
1271 {
1272 	class_interface_unregister(&switchtec_interface);
1273 }
1274 module_exit(switchtec_ntb_exit);
1275