xref: /linux/drivers/ntb/hw/mscc/ntb_hw_switchtec.c (revision bbe35ca5aa2b9e7413c3b14c4887e05829bcd822)
1 /*
2  * Microsemi Switchtec(tm) PCIe Management Driver
3  * Copyright (c) 2017, Microsemi Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <linux/switchtec.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/ntb.h>
22 
23 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
24 MODULE_VERSION("0.1");
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Microsemi Corporation");
27 
28 static ulong max_mw_size = SZ_2M;
29 module_param(max_mw_size, ulong, 0644);
30 MODULE_PARM_DESC(max_mw_size,
31 	"Max memory window size reported to the upper layer");
32 
33 static bool use_lut_mws;
34 module_param(use_lut_mws, bool, 0644);
35 MODULE_PARM_DESC(use_lut_mws,
36 		 "Enable the use of the LUT based memory windows");
37 
38 #ifndef ioread64
39 #ifdef readq
40 #define ioread64 readq
41 #else
42 #define ioread64 _ioread64
43 static inline u64 _ioread64(void __iomem *mmio)
44 {
45 	u64 low, high;
46 
47 	low = ioread32(mmio);
48 	high = ioread32(mmio + sizeof(u32));
49 	return low | (high << 32);
50 }
51 #endif
52 #endif
53 
54 #ifndef iowrite64
55 #ifdef writeq
56 #define iowrite64 writeq
57 #else
58 #define iowrite64 _iowrite64
59 static inline void _iowrite64(u64 val, void __iomem *mmio)
60 {
61 	iowrite32(val, mmio);
62 	iowrite32(val >> 32, mmio + sizeof(u32));
63 }
64 #endif
65 #endif
66 
67 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
68 #define MAX_MWS     128
69 
70 struct shared_mw {
71 	u32 magic;
72 	u32 link_sta;
73 	u32 partition_id;
74 	u64 mw_sizes[MAX_MWS];
75 	u32 spad[128];
76 };
77 
78 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
79 #define LUT_SIZE SZ_64K
80 
81 struct switchtec_ntb {
82 	struct ntb_dev ntb;
83 	struct switchtec_dev *stdev;
84 
85 	int self_partition;
86 	int peer_partition;
87 
88 	int doorbell_irq;
89 	int message_irq;
90 
91 	struct ntb_info_regs __iomem *mmio_ntb;
92 	struct ntb_ctrl_regs __iomem *mmio_ctrl;
93 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
94 	struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
95 	struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
96 	struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
97 
98 	struct shared_mw *self_shared;
99 	struct shared_mw __iomem *peer_shared;
100 	dma_addr_t self_shared_dma;
101 
102 	u64 db_mask;
103 	u64 db_valid_mask;
104 	int db_shift;
105 	int db_peer_shift;
106 
107 	/* synchronize rmw access of db_mask and hw reg */
108 	spinlock_t db_mask_lock;
109 
110 	int nr_direct_mw;
111 	int nr_lut_mw;
112 	int nr_rsvd_luts;
113 	int direct_mw_to_bar[MAX_DIRECT_MW];
114 
115 	int peer_nr_direct_mw;
116 	int peer_nr_lut_mw;
117 	int peer_direct_mw_to_bar[MAX_DIRECT_MW];
118 
119 	bool link_is_up;
120 	enum ntb_speed link_speed;
121 	enum ntb_width link_width;
122 };
123 
124 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
125 {
126 	return container_of(ntb, struct switchtec_ntb, ntb);
127 }
128 
129 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
130 				 struct ntb_ctrl_regs __iomem *ctl,
131 				 u32 op, int wait_status)
132 {
133 	static const char * const op_text[] = {
134 		[NTB_CTRL_PART_OP_LOCK] = "lock",
135 		[NTB_CTRL_PART_OP_CFG] = "configure",
136 		[NTB_CTRL_PART_OP_RESET] = "reset",
137 	};
138 
139 	int i;
140 	u32 ps;
141 	int status;
142 
143 	switch (op) {
144 	case NTB_CTRL_PART_OP_LOCK:
145 		status = NTB_CTRL_PART_STATUS_LOCKING;
146 		break;
147 	case NTB_CTRL_PART_OP_CFG:
148 		status = NTB_CTRL_PART_STATUS_CONFIGURING;
149 		break;
150 	case NTB_CTRL_PART_OP_RESET:
151 		status = NTB_CTRL_PART_STATUS_RESETTING;
152 		break;
153 	default:
154 		return -EINVAL;
155 	}
156 
157 	iowrite32(op, &ctl->partition_op);
158 
159 	for (i = 0; i < 1000; i++) {
160 		if (msleep_interruptible(50) != 0) {
161 			iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
162 			return -EINTR;
163 		}
164 
165 		ps = ioread32(&ctl->partition_status) & 0xFFFF;
166 
167 		if (ps != status)
168 			break;
169 	}
170 
171 	if (ps == wait_status)
172 		return 0;
173 
174 	if (ps == status) {
175 		dev_err(&sndev->stdev->dev,
176 			"Timed out while performing %s (%d). (%08x)\n",
177 			op_text[op], op,
178 			ioread32(&ctl->partition_status));
179 
180 		return -ETIMEDOUT;
181 	}
182 
183 	return -EIO;
184 }
185 
186 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
187 				  u32 val)
188 {
189 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg))
190 		return -EINVAL;
191 
192 	iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg);
193 
194 	return 0;
195 }
196 
197 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
198 {
199 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
200 	int nr_direct_mw = sndev->peer_nr_direct_mw;
201 	int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
202 
203 	if (pidx != NTB_DEF_PEER_IDX)
204 		return -EINVAL;
205 
206 	if (!use_lut_mws)
207 		nr_lut_mw = 0;
208 
209 	return nr_direct_mw + nr_lut_mw;
210 }
211 
212 static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
213 {
214 	return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
215 }
216 
217 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
218 {
219 	return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
220 }
221 
222 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
223 				      int widx, resource_size_t *addr_align,
224 				      resource_size_t *size_align,
225 				      resource_size_t *size_max)
226 {
227 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
228 	int lut;
229 	resource_size_t size;
230 
231 	if (pidx != NTB_DEF_PEER_IDX)
232 		return -EINVAL;
233 
234 	lut = widx >= sndev->peer_nr_direct_mw;
235 	size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
236 
237 	if (size == 0)
238 		return -EINVAL;
239 
240 	if (addr_align)
241 		*addr_align = lut ? size : SZ_4K;
242 
243 	if (size_align)
244 		*size_align = lut ? size : SZ_4K;
245 
246 	if (size_max)
247 		*size_max = size;
248 
249 	return 0;
250 }
251 
252 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
253 {
254 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
255 	int bar = sndev->peer_direct_mw_to_bar[idx];
256 	u32 ctl_val;
257 
258 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
259 	ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
260 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
261 	iowrite32(0, &ctl->bar_entry[bar].win_size);
262 	iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
263 }
264 
265 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
266 {
267 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
268 
269 	iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
270 }
271 
272 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
273 					dma_addr_t addr, resource_size_t size)
274 {
275 	int xlate_pos = ilog2(size);
276 	int bar = sndev->peer_direct_mw_to_bar[idx];
277 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
278 	u32 ctl_val;
279 
280 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
281 	ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
282 
283 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
284 	iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
285 	iowrite64(sndev->self_partition | addr,
286 		  &ctl->bar_entry[bar].xlate_addr);
287 }
288 
289 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
290 				     dma_addr_t addr, resource_size_t size)
291 {
292 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
293 
294 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
295 		  &ctl->lut_entry[peer_lut_index(sndev, idx)]);
296 }
297 
298 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
299 				      dma_addr_t addr, resource_size_t size)
300 {
301 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
302 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
303 	int xlate_pos = ilog2(size);
304 	int nr_direct_mw = sndev->peer_nr_direct_mw;
305 	int rc;
306 
307 	if (pidx != NTB_DEF_PEER_IDX)
308 		return -EINVAL;
309 
310 	dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
311 		widx, pidx, &addr, &size);
312 
313 	if (widx >= switchtec_ntb_mw_count(ntb, pidx))
314 		return -EINVAL;
315 
316 	if (xlate_pos < 12)
317 		return -EINVAL;
318 
319 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
320 				   NTB_CTRL_PART_STATUS_LOCKED);
321 	if (rc)
322 		return rc;
323 
324 	if (addr == 0 || size == 0) {
325 		if (widx < nr_direct_mw)
326 			switchtec_ntb_mw_clr_direct(sndev, widx);
327 		else
328 			switchtec_ntb_mw_clr_lut(sndev, widx);
329 	} else {
330 		if (widx < nr_direct_mw)
331 			switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
332 		else
333 			switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
334 	}
335 
336 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
337 				   NTB_CTRL_PART_STATUS_NORMAL);
338 
339 	if (rc == -EIO) {
340 		dev_err(&sndev->stdev->dev,
341 			"Hardware reported an error configuring mw %d: %08x\n",
342 			widx, ioread32(&ctl->bar_error));
343 
344 		if (widx < nr_direct_mw)
345 			switchtec_ntb_mw_clr_direct(sndev, widx);
346 		else
347 			switchtec_ntb_mw_clr_lut(sndev, widx);
348 
349 		switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
350 				      NTB_CTRL_PART_STATUS_NORMAL);
351 	}
352 
353 	return rc;
354 }
355 
356 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
357 {
358 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
359 	int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
360 
361 	return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
362 }
363 
364 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
365 					 int idx, phys_addr_t *base,
366 					 resource_size_t *size)
367 {
368 	int bar = sndev->direct_mw_to_bar[idx];
369 	size_t offset = 0;
370 
371 	if (bar < 0)
372 		return -EINVAL;
373 
374 	if (idx == 0) {
375 		/*
376 		 * This is the direct BAR shared with the LUTs
377 		 * which means the actual window will be offset
378 		 * by the size of all the LUT entries.
379 		 */
380 
381 		offset = LUT_SIZE * sndev->nr_lut_mw;
382 	}
383 
384 	if (base)
385 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
386 
387 	if (size) {
388 		*size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
389 		if (offset && *size > offset)
390 			*size = offset;
391 
392 		if (*size > max_mw_size)
393 			*size = max_mw_size;
394 	}
395 
396 	return 0;
397 }
398 
399 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
400 				      int idx, phys_addr_t *base,
401 				      resource_size_t *size)
402 {
403 	int bar = sndev->direct_mw_to_bar[0];
404 	int offset;
405 
406 	offset = LUT_SIZE * lut_index(sndev, idx);
407 
408 	if (base)
409 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
410 
411 	if (size)
412 		*size = LUT_SIZE;
413 
414 	return 0;
415 }
416 
417 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
418 					  phys_addr_t *base,
419 					  resource_size_t *size)
420 {
421 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
422 
423 	if (idx < sndev->nr_direct_mw)
424 		return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
425 	else if (idx < switchtec_ntb_peer_mw_count(ntb))
426 		return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
427 	else
428 		return -EINVAL;
429 }
430 
431 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
432 					  int partition,
433 					  enum ntb_speed *speed,
434 					  enum ntb_width *width)
435 {
436 	struct switchtec_dev *stdev = sndev->stdev;
437 
438 	u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
439 	u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
440 
441 	if (speed)
442 		*speed = (linksta >> 16) & 0xF;
443 
444 	if (width)
445 		*width = (linksta >> 20) & 0x3F;
446 }
447 
448 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
449 {
450 	enum ntb_speed self_speed, peer_speed;
451 	enum ntb_width self_width, peer_width;
452 
453 	if (!sndev->link_is_up) {
454 		sndev->link_speed = NTB_SPEED_NONE;
455 		sndev->link_width = NTB_WIDTH_NONE;
456 		return;
457 	}
458 
459 	switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
460 				      &self_speed, &self_width);
461 	switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
462 				      &peer_speed, &peer_width);
463 
464 	sndev->link_speed = min(self_speed, peer_speed);
465 	sndev->link_width = min(self_width, peer_width);
466 }
467 
468 enum {
469 	LINK_MESSAGE = 0,
470 	MSG_LINK_UP = 1,
471 	MSG_LINK_DOWN = 2,
472 	MSG_CHECK_LINK = 3,
473 };
474 
475 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
476 {
477 	int link_sta;
478 	int old = sndev->link_is_up;
479 
480 	link_sta = sndev->self_shared->link_sta;
481 	if (link_sta) {
482 		u64 peer = ioread64(&sndev->peer_shared->magic);
483 
484 		if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
485 			link_sta = peer >> 32;
486 		else
487 			link_sta = 0;
488 	}
489 
490 	sndev->link_is_up = link_sta;
491 	switchtec_ntb_set_link_speed(sndev);
492 
493 	if (link_sta != old) {
494 		switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
495 		ntb_link_event(&sndev->ntb);
496 		dev_info(&sndev->stdev->dev, "ntb link %s\n",
497 			 link_sta ? "up" : "down");
498 	}
499 }
500 
501 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
502 {
503 	struct switchtec_ntb *sndev = stdev->sndev;
504 
505 	switchtec_ntb_check_link(sndev);
506 }
507 
508 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
509 				    enum ntb_speed *speed,
510 				    enum ntb_width *width)
511 {
512 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
513 
514 	if (speed)
515 		*speed = sndev->link_speed;
516 	if (width)
517 		*width = sndev->link_width;
518 
519 	return sndev->link_is_up;
520 }
521 
522 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
523 				     enum ntb_speed max_speed,
524 				     enum ntb_width max_width)
525 {
526 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
527 
528 	dev_dbg(&sndev->stdev->dev, "enabling link\n");
529 
530 	sndev->self_shared->link_sta = 1;
531 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
532 
533 	switchtec_ntb_check_link(sndev);
534 
535 	return 0;
536 }
537 
538 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
539 {
540 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
541 
542 	dev_dbg(&sndev->stdev->dev, "disabling link\n");
543 
544 	sndev->self_shared->link_sta = 0;
545 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
546 
547 	switchtec_ntb_check_link(sndev);
548 
549 	return 0;
550 }
551 
552 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
553 {
554 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
555 
556 	return sndev->db_valid_mask;
557 }
558 
559 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
560 {
561 	return 1;
562 }
563 
564 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
565 {
566 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
567 
568 	if (db_vector < 0 || db_vector > 1)
569 		return 0;
570 
571 	return sndev->db_valid_mask;
572 }
573 
574 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
575 {
576 	u64 ret;
577 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
578 
579 	ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
580 
581 	return ret & sndev->db_valid_mask;
582 }
583 
584 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
585 {
586 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
587 
588 	iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
589 
590 	return 0;
591 }
592 
593 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
594 {
595 	unsigned long irqflags;
596 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
597 
598 	if (db_bits & ~sndev->db_valid_mask)
599 		return -EINVAL;
600 
601 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
602 
603 	sndev->db_mask |= db_bits << sndev->db_shift;
604 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
605 
606 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
607 
608 	return 0;
609 }
610 
611 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
612 {
613 	unsigned long irqflags;
614 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
615 
616 	if (db_bits & ~sndev->db_valid_mask)
617 		return -EINVAL;
618 
619 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
620 
621 	sndev->db_mask &= ~(db_bits << sndev->db_shift);
622 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
623 
624 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
625 
626 	return 0;
627 }
628 
629 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
630 {
631 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
632 
633 	return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
634 }
635 
636 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
637 				      phys_addr_t *db_addr,
638 				      resource_size_t *db_size)
639 {
640 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
641 	unsigned long offset;
642 
643 	offset = (unsigned long)sndev->mmio_self_dbmsg->odb -
644 		(unsigned long)sndev->stdev->mmio;
645 
646 	offset += sndev->db_shift / 8;
647 
648 	if (db_addr)
649 		*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
650 	if (db_size)
651 		*db_size = sizeof(u32);
652 
653 	return 0;
654 }
655 
656 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
657 {
658 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
659 
660 	iowrite64(db_bits << sndev->db_peer_shift,
661 		  &sndev->mmio_self_dbmsg->odb);
662 
663 	return 0;
664 }
665 
666 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
667 {
668 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
669 
670 	return ARRAY_SIZE(sndev->self_shared->spad);
671 }
672 
673 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
674 {
675 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
676 
677 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
678 		return 0;
679 
680 	if (!sndev->self_shared)
681 		return 0;
682 
683 	return sndev->self_shared->spad[idx];
684 }
685 
686 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
687 {
688 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
689 
690 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
691 		return -EINVAL;
692 
693 	if (!sndev->self_shared)
694 		return -EIO;
695 
696 	sndev->self_shared->spad[idx] = val;
697 
698 	return 0;
699 }
700 
701 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
702 					int sidx)
703 {
704 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
705 
706 	if (pidx != NTB_DEF_PEER_IDX)
707 		return -EINVAL;
708 
709 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
710 		return 0;
711 
712 	if (!sndev->peer_shared)
713 		return 0;
714 
715 	return ioread32(&sndev->peer_shared->spad[sidx]);
716 }
717 
718 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
719 					 int sidx, u32 val)
720 {
721 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
722 
723 	if (pidx != NTB_DEF_PEER_IDX)
724 		return -EINVAL;
725 
726 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
727 		return -EINVAL;
728 
729 	if (!sndev->peer_shared)
730 		return -EIO;
731 
732 	iowrite32(val, &sndev->peer_shared->spad[sidx]);
733 
734 	return 0;
735 }
736 
737 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
738 					int sidx, phys_addr_t *spad_addr)
739 {
740 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
741 	unsigned long offset;
742 
743 	if (pidx != NTB_DEF_PEER_IDX)
744 		return -EINVAL;
745 
746 	offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
747 		(unsigned long)sndev->stdev->mmio;
748 
749 	if (spad_addr)
750 		*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
751 
752 	return 0;
753 }
754 
755 static const struct ntb_dev_ops switchtec_ntb_ops = {
756 	.mw_count		= switchtec_ntb_mw_count,
757 	.mw_get_align		= switchtec_ntb_mw_get_align,
758 	.mw_set_trans		= switchtec_ntb_mw_set_trans,
759 	.peer_mw_count		= switchtec_ntb_peer_mw_count,
760 	.peer_mw_get_addr	= switchtec_ntb_peer_mw_get_addr,
761 	.link_is_up		= switchtec_ntb_link_is_up,
762 	.link_enable		= switchtec_ntb_link_enable,
763 	.link_disable		= switchtec_ntb_link_disable,
764 	.db_valid_mask		= switchtec_ntb_db_valid_mask,
765 	.db_vector_count	= switchtec_ntb_db_vector_count,
766 	.db_vector_mask		= switchtec_ntb_db_vector_mask,
767 	.db_read		= switchtec_ntb_db_read,
768 	.db_clear		= switchtec_ntb_db_clear,
769 	.db_set_mask		= switchtec_ntb_db_set_mask,
770 	.db_clear_mask		= switchtec_ntb_db_clear_mask,
771 	.db_read_mask		= switchtec_ntb_db_read_mask,
772 	.peer_db_addr		= switchtec_ntb_peer_db_addr,
773 	.peer_db_set		= switchtec_ntb_peer_db_set,
774 	.spad_count		= switchtec_ntb_spad_count,
775 	.spad_read		= switchtec_ntb_spad_read,
776 	.spad_write		= switchtec_ntb_spad_write,
777 	.peer_spad_read		= switchtec_ntb_peer_spad_read,
778 	.peer_spad_write	= switchtec_ntb_peer_spad_write,
779 	.peer_spad_addr		= switchtec_ntb_peer_spad_addr,
780 };
781 
782 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
783 {
784 	u64 tpart_vec;
785 	int self;
786 	u64 part_map;
787 	int bit;
788 
789 	sndev->ntb.pdev = sndev->stdev->pdev;
790 	sndev->ntb.topo = NTB_TOPO_SWITCH;
791 	sndev->ntb.ops = &switchtec_ntb_ops;
792 
793 	sndev->self_partition = sndev->stdev->partition;
794 
795 	sndev->mmio_ntb = sndev->stdev->mmio_ntb;
796 
797 	self = sndev->self_partition;
798 	tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
799 	tpart_vec <<= 32;
800 	tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
801 
802 	part_map = ioread64(&sndev->mmio_ntb->ep_map);
803 	part_map &= ~(1 << sndev->self_partition);
804 
805 	if (!ffs(tpart_vec)) {
806 		if (sndev->stdev->partition_count != 2) {
807 			dev_err(&sndev->stdev->dev,
808 				"ntb target partition not defined\n");
809 			return -ENODEV;
810 		}
811 
812 		bit = ffs(part_map);
813 		if (!bit) {
814 			dev_err(&sndev->stdev->dev,
815 				"peer partition is not NT partition\n");
816 			return -ENODEV;
817 		}
818 
819 		sndev->peer_partition = bit - 1;
820 	} else {
821 		if (ffs(tpart_vec) != fls(tpart_vec)) {
822 			dev_err(&sndev->stdev->dev,
823 				"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
824 			return -ENODEV;
825 		}
826 
827 		sndev->peer_partition = ffs(tpart_vec) - 1;
828 		if (!(part_map && (1 << sndev->peer_partition))) {
829 			dev_err(&sndev->stdev->dev,
830 				"ntb target partition is not NT partition\n");
831 			return -ENODEV;
832 		}
833 	}
834 
835 	dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
836 		sndev->self_partition, sndev->stdev->partition_count);
837 
838 	sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
839 		SWITCHTEC_NTB_REG_CTRL_OFFSET;
840 	sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
841 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
842 
843 	sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
844 	sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
845 	sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
846 
847 	return 0;
848 }
849 
850 static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
851 			       struct ntb_ctrl_regs __iomem *ctl,
852 			       int lut_idx, int partition,
853 			       dma_addr_t addr)
854 {
855 	int peer_bar = sndev->peer_direct_mw_to_bar[0];
856 	u32 ctl_val;
857 	int rc;
858 
859 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
860 				   NTB_CTRL_PART_STATUS_LOCKED);
861 	if (rc)
862 		return rc;
863 
864 	ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
865 	ctl_val &= 0xFF;
866 	ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
867 	ctl_val |= ilog2(LUT_SIZE) << 8;
868 	ctl_val |= (sndev->nr_lut_mw - 1) << 14;
869 	iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
870 
871 	iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
872 		  &ctl->lut_entry[lut_idx]);
873 
874 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
875 				   NTB_CTRL_PART_STATUS_NORMAL);
876 	if (rc) {
877 		u32 bar_error, lut_error;
878 
879 		bar_error = ioread32(&ctl->bar_error);
880 		lut_error = ioread32(&ctl->lut_error);
881 		dev_err(&sndev->stdev->dev,
882 			"Error setting up reserved lut window: %08x / %08x\n",
883 			bar_error, lut_error);
884 		return rc;
885 	}
886 
887 	return 0;
888 }
889 
890 static int config_req_id_table(struct switchtec_ntb *sndev,
891 			       struct ntb_ctrl_regs __iomem *mmio_ctrl,
892 			       int *req_ids, int count)
893 {
894 	int i, rc = 0;
895 	u32 error;
896 	u32 proxy_id;
897 
898 	if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
899 		dev_err(&sndev->stdev->dev,
900 			"Not enough requester IDs available.\n");
901 		return -EFAULT;
902 	}
903 
904 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
905 				   NTB_CTRL_PART_OP_LOCK,
906 				   NTB_CTRL_PART_STATUS_LOCKED);
907 	if (rc)
908 		return rc;
909 
910 	iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
911 		  &mmio_ctrl->partition_ctrl);
912 
913 	for (i = 0; i < count; i++) {
914 		iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
915 			  &mmio_ctrl->req_id_table[i]);
916 
917 		proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
918 		dev_dbg(&sndev->stdev->dev,
919 			"Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
920 			req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
921 			req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
922 			(proxy_id >> 1) & 0x7);
923 	}
924 
925 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
926 				   NTB_CTRL_PART_OP_CFG,
927 				   NTB_CTRL_PART_STATUS_NORMAL);
928 
929 	if (rc == -EIO) {
930 		error = ioread32(&mmio_ctrl->req_id_error);
931 		dev_err(&sndev->stdev->dev,
932 			"Error setting up the requester ID table: %08x\n",
933 			error);
934 	}
935 
936 	return 0;
937 }
938 
939 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
940 {
941 	int i;
942 	int cnt = 0;
943 
944 	for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
945 		u32 r = ioread32(&ctrl->bar_entry[i].ctl);
946 
947 		if (r & NTB_CTRL_BAR_VALID)
948 			map[cnt++] = i;
949 	}
950 
951 	return cnt;
952 }
953 
954 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
955 {
956 	sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
957 				       sndev->mmio_self_ctrl);
958 
959 	sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
960 	sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
961 
962 	dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
963 		sndev->nr_direct_mw, sndev->nr_lut_mw);
964 
965 	sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
966 					    sndev->mmio_peer_ctrl);
967 
968 	sndev->peer_nr_lut_mw =
969 		ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
970 	sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
971 
972 	dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
973 		sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
974 
975 }
976 
977 /*
978  * There are 64 doorbells in the switch hardware but this is
979  * shared among all partitions. So we must split them in half
980  * (32 for each partition). However, the message interrupts are
981  * also shared with the top 4 doorbells so we just limit this to
982  * 28 doorbells per partition
983  */
984 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
985 {
986 	sndev->db_valid_mask = 0x0FFFFFFF;
987 
988 	if (sndev->self_partition < sndev->peer_partition) {
989 		sndev->db_shift = 0;
990 		sndev->db_peer_shift = 32;
991 	} else {
992 		sndev->db_shift = 32;
993 		sndev->db_peer_shift = 0;
994 	}
995 
996 	sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
997 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
998 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
999 		  &sndev->mmio_self_dbmsg->odb_mask);
1000 }
1001 
1002 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
1003 {
1004 	int i;
1005 	u32 msg_map = 0;
1006 
1007 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1008 		int m = i | sndev->peer_partition << 2;
1009 
1010 		msg_map |= m << i * 8;
1011 	}
1012 
1013 	iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
1014 
1015 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
1016 		iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
1017 			  &sndev->mmio_self_dbmsg->imsg[i]);
1018 }
1019 
1020 static int
1021 switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
1022 {
1023 	int req_ids[2];
1024 
1025 	/*
1026 	 * Root Complex Requester ID (which is 0:00.0)
1027 	 */
1028 	req_ids[0] = 0;
1029 
1030 	/*
1031 	 * Host Bridge Requester ID (as read from the mmap address)
1032 	 */
1033 	req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
1034 
1035 	return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
1036 				   ARRAY_SIZE(req_ids));
1037 }
1038 
1039 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
1040 {
1041 	int i;
1042 
1043 	memset(sndev->self_shared, 0, LUT_SIZE);
1044 	sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
1045 	sndev->self_shared->partition_id = sndev->stdev->partition;
1046 
1047 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1048 		int bar = sndev->direct_mw_to_bar[i];
1049 		resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
1050 
1051 		if (i == 0)
1052 			sz = min_t(resource_size_t, sz,
1053 				   LUT_SIZE * sndev->nr_lut_mw);
1054 
1055 		sndev->self_shared->mw_sizes[i] = sz;
1056 	}
1057 
1058 	for (i = 0; i < sndev->nr_lut_mw; i++) {
1059 		int idx = sndev->nr_direct_mw + i;
1060 
1061 		sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
1062 	}
1063 }
1064 
1065 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1066 {
1067 	int self_bar = sndev->direct_mw_to_bar[0];
1068 	int rc;
1069 
1070 	sndev->nr_rsvd_luts++;
1071 	sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
1072 						 LUT_SIZE,
1073 						 &sndev->self_shared_dma,
1074 						 GFP_KERNEL);
1075 	if (!sndev->self_shared) {
1076 		dev_err(&sndev->stdev->dev,
1077 			"unable to allocate memory for shared mw\n");
1078 		return -ENOMEM;
1079 	}
1080 
1081 	switchtec_ntb_init_shared(sndev);
1082 
1083 	rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
1084 				 sndev->self_partition,
1085 				 sndev->self_shared_dma);
1086 	if (rc)
1087 		goto unalloc_and_exit;
1088 
1089 	sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
1090 	if (!sndev->peer_shared) {
1091 		rc = -ENOMEM;
1092 		goto unalloc_and_exit;
1093 	}
1094 
1095 	dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
1096 	return 0;
1097 
1098 unalloc_and_exit:
1099 	dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1100 			  sndev->self_shared, sndev->self_shared_dma);
1101 
1102 	return rc;
1103 }
1104 
1105 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1106 {
1107 	if (sndev->peer_shared)
1108 		pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1109 
1110 	if (sndev->self_shared)
1111 		dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1112 				  sndev->self_shared,
1113 				  sndev->self_shared_dma);
1114 	sndev->nr_rsvd_luts--;
1115 }
1116 
1117 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1118 {
1119 	struct switchtec_ntb *sndev = dev;
1120 
1121 	dev_dbg(&sndev->stdev->dev, "doorbell\n");
1122 
1123 	ntb_db_event(&sndev->ntb, 0);
1124 
1125 	return IRQ_HANDLED;
1126 }
1127 
1128 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1129 {
1130 	int i;
1131 	struct switchtec_ntb *sndev = dev;
1132 
1133 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1134 		u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1135 
1136 		if (msg & NTB_DBMSG_IMSG_STATUS) {
1137 			dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
1138 				i, (u32)msg);
1139 			iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1140 
1141 			if (i == LINK_MESSAGE)
1142 				switchtec_ntb_check_link(sndev);
1143 		}
1144 	}
1145 
1146 	return IRQ_HANDLED;
1147 }
1148 
1149 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1150 {
1151 	int i;
1152 	int rc;
1153 	int doorbell_irq = 0;
1154 	int message_irq = 0;
1155 	int event_irq;
1156 	int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1157 
1158 	event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1159 
1160 	while (doorbell_irq == event_irq)
1161 		doorbell_irq++;
1162 	while (message_irq == doorbell_irq ||
1163 	       message_irq == event_irq)
1164 		message_irq++;
1165 
1166 	dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
1167 		event_irq, doorbell_irq, message_irq);
1168 
1169 	for (i = 0; i < idb_vecs - 4; i++)
1170 		iowrite8(doorbell_irq,
1171 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1172 
1173 	for (; i < idb_vecs; i++)
1174 		iowrite8(message_irq,
1175 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1176 
1177 	sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1178 	sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1179 
1180 	rc = request_irq(sndev->doorbell_irq,
1181 			 switchtec_ntb_doorbell_isr, 0,
1182 			 "switchtec_ntb_doorbell", sndev);
1183 	if (rc)
1184 		return rc;
1185 
1186 	rc = request_irq(sndev->message_irq,
1187 			 switchtec_ntb_message_isr, 0,
1188 			 "switchtec_ntb_message", sndev);
1189 	if (rc) {
1190 		free_irq(sndev->doorbell_irq, sndev);
1191 		return rc;
1192 	}
1193 
1194 	return 0;
1195 }
1196 
1197 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1198 {
1199 	free_irq(sndev->doorbell_irq, sndev);
1200 	free_irq(sndev->message_irq, sndev);
1201 }
1202 
1203 static int switchtec_ntb_add(struct device *dev,
1204 			     struct class_interface *class_intf)
1205 {
1206 	struct switchtec_dev *stdev = to_stdev(dev);
1207 	struct switchtec_ntb *sndev;
1208 	int rc;
1209 
1210 	stdev->sndev = NULL;
1211 
1212 	if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
1213 		return -ENODEV;
1214 
1215 	sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1216 	if (!sndev)
1217 		return -ENOMEM;
1218 
1219 	sndev->stdev = stdev;
1220 	rc = switchtec_ntb_init_sndev(sndev);
1221 	if (rc)
1222 		goto free_and_exit;
1223 
1224 	switchtec_ntb_init_mw(sndev);
1225 	switchtec_ntb_init_db(sndev);
1226 	switchtec_ntb_init_msgs(sndev);
1227 
1228 	rc = switchtec_ntb_init_req_id_table(sndev);
1229 	if (rc)
1230 		goto free_and_exit;
1231 
1232 	rc = switchtec_ntb_init_shared_mw(sndev);
1233 	if (rc)
1234 		goto free_and_exit;
1235 
1236 	rc = switchtec_ntb_init_db_msg_irq(sndev);
1237 	if (rc)
1238 		goto deinit_shared_and_exit;
1239 
1240 	rc = ntb_register_device(&sndev->ntb);
1241 	if (rc)
1242 		goto deinit_and_exit;
1243 
1244 	stdev->sndev = sndev;
1245 	stdev->link_notifier = switchtec_ntb_link_notification;
1246 	dev_info(dev, "NTB device registered\n");
1247 
1248 	return 0;
1249 
1250 deinit_and_exit:
1251 	switchtec_ntb_deinit_db_msg_irq(sndev);
1252 deinit_shared_and_exit:
1253 	switchtec_ntb_deinit_shared_mw(sndev);
1254 free_and_exit:
1255 	kfree(sndev);
1256 	dev_err(dev, "failed to register ntb device: %d\n", rc);
1257 	return rc;
1258 }
1259 
1260 void switchtec_ntb_remove(struct device *dev,
1261 			  struct class_interface *class_intf)
1262 {
1263 	struct switchtec_dev *stdev = to_stdev(dev);
1264 	struct switchtec_ntb *sndev = stdev->sndev;
1265 
1266 	if (!sndev)
1267 		return;
1268 
1269 	stdev->link_notifier = NULL;
1270 	stdev->sndev = NULL;
1271 	ntb_unregister_device(&sndev->ntb);
1272 	switchtec_ntb_deinit_db_msg_irq(sndev);
1273 	switchtec_ntb_deinit_shared_mw(sndev);
1274 	kfree(sndev);
1275 	dev_info(dev, "ntb device unregistered\n");
1276 }
1277 
1278 static struct class_interface switchtec_interface  = {
1279 	.add_dev = switchtec_ntb_add,
1280 	.remove_dev = switchtec_ntb_remove,
1281 };
1282 
1283 static int __init switchtec_ntb_init(void)
1284 {
1285 	switchtec_interface.class = switchtec_class;
1286 	return class_interface_register(&switchtec_interface);
1287 }
1288 module_init(switchtec_ntb_init);
1289 
1290 static void __exit switchtec_ntb_exit(void)
1291 {
1292 	class_interface_unregister(&switchtec_interface);
1293 }
1294 module_exit(switchtec_ntb_exit);
1295