xref: /linux/drivers/ntb/hw/mscc/ntb_hw_switchtec.c (revision 3df54c870f52b4c47b53eead8d22a109f741b91c)
1 /*
2  * Microsemi Switchtec(tm) PCIe Management Driver
3  * Copyright (c) 2017, Microsemi Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <linux/switchtec.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/ntb.h>
22 
23 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
24 MODULE_VERSION("0.1");
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Microsemi Corporation");
27 
28 static ulong max_mw_size = SZ_2M;
29 module_param(max_mw_size, ulong, 0644);
30 MODULE_PARM_DESC(max_mw_size,
31 	"Max memory window size reported to the upper layer");
32 
33 static bool use_lut_mws;
34 module_param(use_lut_mws, bool, 0644);
35 MODULE_PARM_DESC(use_lut_mws,
36 		 "Enable the use of the LUT based memory windows");
37 
38 #ifndef ioread64
39 #ifdef readq
40 #define ioread64 readq
41 #else
42 #define ioread64 _ioread64
43 static inline u64 _ioread64(void __iomem *mmio)
44 {
45 	u64 low, high;
46 
47 	low = ioread32(mmio);
48 	high = ioread32(mmio + sizeof(u32));
49 	return low | (high << 32);
50 }
51 #endif
52 #endif
53 
54 #ifndef iowrite64
55 #ifdef writeq
56 #define iowrite64 writeq
57 #else
58 #define iowrite64 _iowrite64
59 static inline void _iowrite64(u64 val, void __iomem *mmio)
60 {
61 	iowrite32(val, mmio);
62 	iowrite32(val >> 32, mmio + sizeof(u32));
63 }
64 #endif
65 #endif
66 
67 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
68 #define MAX_MWS     128
69 
70 struct shared_mw {
71 	u32 magic;
72 	u32 link_sta;
73 	u32 partition_id;
74 	u64 mw_sizes[MAX_MWS];
75 	u32 spad[128];
76 };
77 
78 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
79 #define LUT_SIZE SZ_64K
80 
81 struct switchtec_ntb {
82 	struct ntb_dev ntb;
83 	struct switchtec_dev *stdev;
84 
85 	int self_partition;
86 	int peer_partition;
87 
88 	int doorbell_irq;
89 	int message_irq;
90 
91 	struct ntb_info_regs __iomem *mmio_ntb;
92 	struct ntb_ctrl_regs __iomem *mmio_ctrl;
93 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
94 	struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
95 	struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
96 	struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
97 
98 	struct shared_mw *self_shared;
99 	struct shared_mw __iomem *peer_shared;
100 	dma_addr_t self_shared_dma;
101 
102 	u64 db_mask;
103 	u64 db_valid_mask;
104 	int db_shift;
105 	int db_peer_shift;
106 
107 	/* synchronize rmw access of db_mask and hw reg */
108 	spinlock_t db_mask_lock;
109 
110 	int nr_direct_mw;
111 	int nr_lut_mw;
112 	int direct_mw_to_bar[MAX_DIRECT_MW];
113 
114 	int peer_nr_direct_mw;
115 	int peer_nr_lut_mw;
116 	int peer_direct_mw_to_bar[MAX_DIRECT_MW];
117 
118 	bool link_is_up;
119 	enum ntb_speed link_speed;
120 	enum ntb_width link_width;
121 };
122 
123 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
124 {
125 	return container_of(ntb, struct switchtec_ntb, ntb);
126 }
127 
128 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
129 				 struct ntb_ctrl_regs __iomem *ctl,
130 				 u32 op, int wait_status)
131 {
132 	static const char * const op_text[] = {
133 		[NTB_CTRL_PART_OP_LOCK] = "lock",
134 		[NTB_CTRL_PART_OP_CFG] = "configure",
135 		[NTB_CTRL_PART_OP_RESET] = "reset",
136 	};
137 
138 	int i;
139 	u32 ps;
140 	int status;
141 
142 	switch (op) {
143 	case NTB_CTRL_PART_OP_LOCK:
144 		status = NTB_CTRL_PART_STATUS_LOCKING;
145 		break;
146 	case NTB_CTRL_PART_OP_CFG:
147 		status = NTB_CTRL_PART_STATUS_CONFIGURING;
148 		break;
149 	case NTB_CTRL_PART_OP_RESET:
150 		status = NTB_CTRL_PART_STATUS_RESETTING;
151 		break;
152 	default:
153 		return -EINVAL;
154 	}
155 
156 	iowrite32(op, &ctl->partition_op);
157 
158 	for (i = 0; i < 1000; i++) {
159 		if (msleep_interruptible(50) != 0) {
160 			iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
161 			return -EINTR;
162 		}
163 
164 		ps = ioread32(&ctl->partition_status) & 0xFFFF;
165 
166 		if (ps != status)
167 			break;
168 	}
169 
170 	if (ps == wait_status)
171 		return 0;
172 
173 	if (ps == status) {
174 		dev_err(&sndev->stdev->dev,
175 			"Timed out while performing %s (%d). (%08x)\n",
176 			op_text[op], op,
177 			ioread32(&ctl->partition_status));
178 
179 		return -ETIMEDOUT;
180 	}
181 
182 	return -EIO;
183 }
184 
185 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
186 				  u32 val)
187 {
188 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg))
189 		return -EINVAL;
190 
191 	iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg);
192 
193 	return 0;
194 }
195 
196 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
197 {
198 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
199 	int nr_direct_mw = sndev->peer_nr_direct_mw;
200 	int nr_lut_mw = sndev->peer_nr_lut_mw - 1;
201 
202 	if (pidx != NTB_DEF_PEER_IDX)
203 		return -EINVAL;
204 
205 	if (!use_lut_mws)
206 		nr_lut_mw = 0;
207 
208 	return nr_direct_mw + nr_lut_mw;
209 }
210 
211 static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
212 {
213 	return mw_idx - sndev->nr_direct_mw + 1;
214 }
215 
216 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
217 {
218 	return mw_idx - sndev->peer_nr_direct_mw + 1;
219 }
220 
221 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
222 				      int widx, resource_size_t *addr_align,
223 				      resource_size_t *size_align,
224 				      resource_size_t *size_max)
225 {
226 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
227 	int lut;
228 	resource_size_t size;
229 
230 	if (pidx != NTB_DEF_PEER_IDX)
231 		return -EINVAL;
232 
233 	lut = widx >= sndev->peer_nr_direct_mw;
234 	size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
235 
236 	if (size == 0)
237 		return -EINVAL;
238 
239 	if (addr_align)
240 		*addr_align = lut ? size : SZ_4K;
241 
242 	if (size_align)
243 		*size_align = lut ? size : SZ_4K;
244 
245 	if (size_max)
246 		*size_max = size;
247 
248 	return 0;
249 }
250 
251 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
252 {
253 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
254 	int bar = sndev->peer_direct_mw_to_bar[idx];
255 	u32 ctl_val;
256 
257 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
258 	ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
259 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
260 	iowrite32(0, &ctl->bar_entry[bar].win_size);
261 	iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
262 }
263 
264 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
265 {
266 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
267 
268 	iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
269 }
270 
271 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
272 					dma_addr_t addr, resource_size_t size)
273 {
274 	int xlate_pos = ilog2(size);
275 	int bar = sndev->peer_direct_mw_to_bar[idx];
276 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
277 	u32 ctl_val;
278 
279 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
280 	ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
281 
282 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
283 	iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
284 	iowrite64(sndev->self_partition | addr,
285 		  &ctl->bar_entry[bar].xlate_addr);
286 }
287 
288 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
289 				     dma_addr_t addr, resource_size_t size)
290 {
291 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
292 
293 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
294 		  &ctl->lut_entry[peer_lut_index(sndev, idx)]);
295 }
296 
297 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
298 				      dma_addr_t addr, resource_size_t size)
299 {
300 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
301 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
302 	int xlate_pos = ilog2(size);
303 	int nr_direct_mw = sndev->peer_nr_direct_mw;
304 	int rc;
305 
306 	if (pidx != NTB_DEF_PEER_IDX)
307 		return -EINVAL;
308 
309 	dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
310 		widx, pidx, &addr, &size);
311 
312 	if (widx >= switchtec_ntb_mw_count(ntb, pidx))
313 		return -EINVAL;
314 
315 	if (xlate_pos < 12)
316 		return -EINVAL;
317 
318 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
319 				   NTB_CTRL_PART_STATUS_LOCKED);
320 	if (rc)
321 		return rc;
322 
323 	if (addr == 0 || size == 0) {
324 		if (widx < nr_direct_mw)
325 			switchtec_ntb_mw_clr_direct(sndev, widx);
326 		else
327 			switchtec_ntb_mw_clr_lut(sndev, widx);
328 	} else {
329 		if (widx < nr_direct_mw)
330 			switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
331 		else
332 			switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
333 	}
334 
335 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
336 				   NTB_CTRL_PART_STATUS_NORMAL);
337 
338 	if (rc == -EIO) {
339 		dev_err(&sndev->stdev->dev,
340 			"Hardware reported an error configuring mw %d: %08x\n",
341 			widx, ioread32(&ctl->bar_error));
342 
343 		if (widx < nr_direct_mw)
344 			switchtec_ntb_mw_clr_direct(sndev, widx);
345 		else
346 			switchtec_ntb_mw_clr_lut(sndev, widx);
347 
348 		switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
349 				      NTB_CTRL_PART_STATUS_NORMAL);
350 	}
351 
352 	return rc;
353 }
354 
355 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
356 {
357 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
358 
359 	return sndev->nr_direct_mw + (use_lut_mws ? sndev->nr_lut_mw - 1 : 0);
360 }
361 
362 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
363 					 int idx, phys_addr_t *base,
364 					 resource_size_t *size)
365 {
366 	int bar = sndev->direct_mw_to_bar[idx];
367 	size_t offset = 0;
368 
369 	if (bar < 0)
370 		return -EINVAL;
371 
372 	if (idx == 0) {
373 		/*
374 		 * This is the direct BAR shared with the LUTs
375 		 * which means the actual window will be offset
376 		 * by the size of all the LUT entries.
377 		 */
378 
379 		offset = LUT_SIZE * sndev->nr_lut_mw;
380 	}
381 
382 	if (base)
383 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
384 
385 	if (size) {
386 		*size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
387 		if (offset && *size > offset)
388 			*size = offset;
389 
390 		if (*size > max_mw_size)
391 			*size = max_mw_size;
392 	}
393 
394 	return 0;
395 }
396 
397 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
398 				      int idx, phys_addr_t *base,
399 				      resource_size_t *size)
400 {
401 	int bar = sndev->direct_mw_to_bar[0];
402 	int offset;
403 
404 	offset = LUT_SIZE * lut_index(sndev, idx);
405 
406 	if (base)
407 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
408 
409 	if (size)
410 		*size = LUT_SIZE;
411 
412 	return 0;
413 }
414 
415 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
416 					  phys_addr_t *base,
417 					  resource_size_t *size)
418 {
419 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
420 
421 	if (idx < sndev->nr_direct_mw)
422 		return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
423 	else if (idx < switchtec_ntb_peer_mw_count(ntb))
424 		return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
425 	else
426 		return -EINVAL;
427 }
428 
429 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
430 					  int partition,
431 					  enum ntb_speed *speed,
432 					  enum ntb_width *width)
433 {
434 	struct switchtec_dev *stdev = sndev->stdev;
435 
436 	u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
437 	u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
438 
439 	if (speed)
440 		*speed = (linksta >> 16) & 0xF;
441 
442 	if (width)
443 		*width = (linksta >> 20) & 0x3F;
444 }
445 
446 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
447 {
448 	enum ntb_speed self_speed, peer_speed;
449 	enum ntb_width self_width, peer_width;
450 
451 	if (!sndev->link_is_up) {
452 		sndev->link_speed = NTB_SPEED_NONE;
453 		sndev->link_width = NTB_WIDTH_NONE;
454 		return;
455 	}
456 
457 	switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
458 				      &self_speed, &self_width);
459 	switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
460 				      &peer_speed, &peer_width);
461 
462 	sndev->link_speed = min(self_speed, peer_speed);
463 	sndev->link_width = min(self_width, peer_width);
464 }
465 
466 enum {
467 	LINK_MESSAGE = 0,
468 	MSG_LINK_UP = 1,
469 	MSG_LINK_DOWN = 2,
470 	MSG_CHECK_LINK = 3,
471 };
472 
473 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
474 {
475 	int link_sta;
476 	int old = sndev->link_is_up;
477 
478 	link_sta = sndev->self_shared->link_sta;
479 	if (link_sta) {
480 		u64 peer = ioread64(&sndev->peer_shared->magic);
481 
482 		if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
483 			link_sta = peer >> 32;
484 		else
485 			link_sta = 0;
486 	}
487 
488 	sndev->link_is_up = link_sta;
489 	switchtec_ntb_set_link_speed(sndev);
490 
491 	if (link_sta != old) {
492 		switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
493 		ntb_link_event(&sndev->ntb);
494 		dev_info(&sndev->stdev->dev, "ntb link %s\n",
495 			 link_sta ? "up" : "down");
496 	}
497 }
498 
499 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
500 {
501 	struct switchtec_ntb *sndev = stdev->sndev;
502 
503 	switchtec_ntb_check_link(sndev);
504 }
505 
506 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
507 				    enum ntb_speed *speed,
508 				    enum ntb_width *width)
509 {
510 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
511 
512 	if (speed)
513 		*speed = sndev->link_speed;
514 	if (width)
515 		*width = sndev->link_width;
516 
517 	return sndev->link_is_up;
518 }
519 
520 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
521 				     enum ntb_speed max_speed,
522 				     enum ntb_width max_width)
523 {
524 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
525 
526 	dev_dbg(&sndev->stdev->dev, "enabling link\n");
527 
528 	sndev->self_shared->link_sta = 1;
529 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
530 
531 	switchtec_ntb_check_link(sndev);
532 
533 	return 0;
534 }
535 
536 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
537 {
538 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
539 
540 	dev_dbg(&sndev->stdev->dev, "disabling link\n");
541 
542 	sndev->self_shared->link_sta = 0;
543 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
544 
545 	switchtec_ntb_check_link(sndev);
546 
547 	return 0;
548 }
549 
550 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
551 {
552 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
553 
554 	return sndev->db_valid_mask;
555 }
556 
557 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
558 {
559 	return 1;
560 }
561 
562 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
563 {
564 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
565 
566 	if (db_vector < 0 || db_vector > 1)
567 		return 0;
568 
569 	return sndev->db_valid_mask;
570 }
571 
572 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
573 {
574 	u64 ret;
575 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
576 
577 	ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
578 
579 	return ret & sndev->db_valid_mask;
580 }
581 
582 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
583 {
584 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
585 
586 	iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
587 
588 	return 0;
589 }
590 
591 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
592 {
593 	unsigned long irqflags;
594 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
595 
596 	if (db_bits & ~sndev->db_valid_mask)
597 		return -EINVAL;
598 
599 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
600 
601 	sndev->db_mask |= db_bits << sndev->db_shift;
602 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
603 
604 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
605 
606 	return 0;
607 }
608 
609 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
610 {
611 	unsigned long irqflags;
612 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
613 
614 	if (db_bits & ~sndev->db_valid_mask)
615 		return -EINVAL;
616 
617 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
618 
619 	sndev->db_mask &= ~(db_bits << sndev->db_shift);
620 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
621 
622 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
623 
624 	return 0;
625 }
626 
627 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
628 {
629 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
630 
631 	return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
632 }
633 
634 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
635 				      phys_addr_t *db_addr,
636 				      resource_size_t *db_size)
637 {
638 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
639 	unsigned long offset;
640 
641 	offset = (unsigned long)sndev->mmio_self_dbmsg->odb -
642 		(unsigned long)sndev->stdev->mmio;
643 
644 	offset += sndev->db_shift / 8;
645 
646 	if (db_addr)
647 		*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
648 	if (db_size)
649 		*db_size = sizeof(u32);
650 
651 	return 0;
652 }
653 
654 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
655 {
656 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
657 
658 	iowrite64(db_bits << sndev->db_peer_shift,
659 		  &sndev->mmio_self_dbmsg->odb);
660 
661 	return 0;
662 }
663 
664 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
665 {
666 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
667 
668 	return ARRAY_SIZE(sndev->self_shared->spad);
669 }
670 
671 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
672 {
673 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
674 
675 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
676 		return 0;
677 
678 	if (!sndev->self_shared)
679 		return 0;
680 
681 	return sndev->self_shared->spad[idx];
682 }
683 
684 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
685 {
686 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
687 
688 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
689 		return -EINVAL;
690 
691 	if (!sndev->self_shared)
692 		return -EIO;
693 
694 	sndev->self_shared->spad[idx] = val;
695 
696 	return 0;
697 }
698 
699 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
700 					int sidx)
701 {
702 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
703 
704 	if (pidx != NTB_DEF_PEER_IDX)
705 		return -EINVAL;
706 
707 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
708 		return 0;
709 
710 	if (!sndev->peer_shared)
711 		return 0;
712 
713 	return ioread32(&sndev->peer_shared->spad[sidx]);
714 }
715 
716 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
717 					 int sidx, u32 val)
718 {
719 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
720 
721 	if (pidx != NTB_DEF_PEER_IDX)
722 		return -EINVAL;
723 
724 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
725 		return -EINVAL;
726 
727 	if (!sndev->peer_shared)
728 		return -EIO;
729 
730 	iowrite32(val, &sndev->peer_shared->spad[sidx]);
731 
732 	return 0;
733 }
734 
735 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
736 					int sidx, phys_addr_t *spad_addr)
737 {
738 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
739 	unsigned long offset;
740 
741 	if (pidx != NTB_DEF_PEER_IDX)
742 		return -EINVAL;
743 
744 	offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
745 		(unsigned long)sndev->stdev->mmio;
746 
747 	if (spad_addr)
748 		*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
749 
750 	return 0;
751 }
752 
753 static const struct ntb_dev_ops switchtec_ntb_ops = {
754 	.mw_count		= switchtec_ntb_mw_count,
755 	.mw_get_align		= switchtec_ntb_mw_get_align,
756 	.mw_set_trans		= switchtec_ntb_mw_set_trans,
757 	.peer_mw_count		= switchtec_ntb_peer_mw_count,
758 	.peer_mw_get_addr	= switchtec_ntb_peer_mw_get_addr,
759 	.link_is_up		= switchtec_ntb_link_is_up,
760 	.link_enable		= switchtec_ntb_link_enable,
761 	.link_disable		= switchtec_ntb_link_disable,
762 	.db_valid_mask		= switchtec_ntb_db_valid_mask,
763 	.db_vector_count	= switchtec_ntb_db_vector_count,
764 	.db_vector_mask		= switchtec_ntb_db_vector_mask,
765 	.db_read		= switchtec_ntb_db_read,
766 	.db_clear		= switchtec_ntb_db_clear,
767 	.db_set_mask		= switchtec_ntb_db_set_mask,
768 	.db_clear_mask		= switchtec_ntb_db_clear_mask,
769 	.db_read_mask		= switchtec_ntb_db_read_mask,
770 	.peer_db_addr		= switchtec_ntb_peer_db_addr,
771 	.peer_db_set		= switchtec_ntb_peer_db_set,
772 	.spad_count		= switchtec_ntb_spad_count,
773 	.spad_read		= switchtec_ntb_spad_read,
774 	.spad_write		= switchtec_ntb_spad_write,
775 	.peer_spad_read		= switchtec_ntb_peer_spad_read,
776 	.peer_spad_write	= switchtec_ntb_peer_spad_write,
777 	.peer_spad_addr		= switchtec_ntb_peer_spad_addr,
778 };
779 
780 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
781 {
782 	u64 tpart_vec;
783 	int self;
784 	u64 part_map;
785 	int bit;
786 
787 	sndev->ntb.pdev = sndev->stdev->pdev;
788 	sndev->ntb.topo = NTB_TOPO_SWITCH;
789 	sndev->ntb.ops = &switchtec_ntb_ops;
790 
791 	sndev->self_partition = sndev->stdev->partition;
792 
793 	sndev->mmio_ntb = sndev->stdev->mmio_ntb;
794 
795 	self = sndev->self_partition;
796 	tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
797 	tpart_vec <<= 32;
798 	tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
799 
800 	part_map = ioread64(&sndev->mmio_ntb->ep_map);
801 	part_map &= ~(1 << sndev->self_partition);
802 
803 	if (!ffs(tpart_vec)) {
804 		if (sndev->stdev->partition_count != 2) {
805 			dev_err(&sndev->stdev->dev,
806 				"ntb target partition not defined\n");
807 			return -ENODEV;
808 		}
809 
810 		bit = ffs(part_map);
811 		if (!bit) {
812 			dev_err(&sndev->stdev->dev,
813 				"peer partition is not NT partition\n");
814 			return -ENODEV;
815 		}
816 
817 		sndev->peer_partition = bit - 1;
818 	} else {
819 		if (ffs(tpart_vec) != fls(tpart_vec)) {
820 			dev_err(&sndev->stdev->dev,
821 				"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
822 			return -ENODEV;
823 		}
824 
825 		sndev->peer_partition = ffs(tpart_vec) - 1;
826 		if (!(part_map && (1 << sndev->peer_partition))) {
827 			dev_err(&sndev->stdev->dev,
828 				"ntb target partition is not NT partition\n");
829 			return -ENODEV;
830 		}
831 	}
832 
833 	dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
834 		sndev->self_partition, sndev->stdev->partition_count);
835 
836 	sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
837 		SWITCHTEC_NTB_REG_CTRL_OFFSET;
838 	sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
839 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
840 
841 	sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
842 	sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
843 	sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
844 
845 	return 0;
846 }
847 
848 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
849 {
850 	int i;
851 	int cnt = 0;
852 
853 	for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
854 		u32 r = ioread32(&ctrl->bar_entry[i].ctl);
855 
856 		if (r & NTB_CTRL_BAR_VALID)
857 			map[cnt++] = i;
858 	}
859 
860 	return cnt;
861 }
862 
863 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
864 {
865 	sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
866 				       sndev->mmio_self_ctrl);
867 
868 	sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
869 	sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
870 
871 	dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
872 		sndev->nr_direct_mw, sndev->nr_lut_mw);
873 
874 	sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
875 					    sndev->mmio_peer_ctrl);
876 
877 	sndev->peer_nr_lut_mw =
878 		ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
879 	sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
880 
881 	dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
882 		sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
883 
884 }
885 
886 /*
887  * There are 64 doorbells in the switch hardware but this is
888  * shared among all partitions. So we must split them in half
889  * (32 for each partition). However, the message interrupts are
890  * also shared with the top 4 doorbells so we just limit this to
891  * 28 doorbells per partition
892  */
893 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
894 {
895 	sndev->db_valid_mask = 0x0FFFFFFF;
896 
897 	if (sndev->self_partition < sndev->peer_partition) {
898 		sndev->db_shift = 0;
899 		sndev->db_peer_shift = 32;
900 	} else {
901 		sndev->db_shift = 32;
902 		sndev->db_peer_shift = 0;
903 	}
904 
905 	sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
906 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
907 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
908 		  &sndev->mmio_self_dbmsg->odb_mask);
909 }
910 
911 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
912 {
913 	int i;
914 	u32 msg_map = 0;
915 
916 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
917 		int m = i | sndev->peer_partition << 2;
918 
919 		msg_map |= m << i * 8;
920 	}
921 
922 	iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
923 
924 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
925 		iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
926 			  &sndev->mmio_self_dbmsg->imsg[i]);
927 }
928 
929 static int switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
930 {
931 	int rc = 0;
932 	u16 req_id;
933 	u32 error;
934 
935 	req_id = ioread16(&sndev->mmio_ntb->requester_id);
936 
937 	if (ioread32(&sndev->mmio_self_ctrl->req_id_table_size) < 2) {
938 		dev_err(&sndev->stdev->dev,
939 			"Not enough requester IDs available\n");
940 		return -EFAULT;
941 	}
942 
943 	rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
944 				   NTB_CTRL_PART_OP_LOCK,
945 				   NTB_CTRL_PART_STATUS_LOCKED);
946 	if (rc)
947 		return rc;
948 
949 	iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
950 		  &sndev->mmio_self_ctrl->partition_ctrl);
951 
952 	/*
953 	 * Root Complex Requester ID (which is 0:00.0)
954 	 */
955 	iowrite32(0 << 16 | NTB_CTRL_REQ_ID_EN,
956 		  &sndev->mmio_self_ctrl->req_id_table[0]);
957 
958 	/*
959 	 * Host Bridge Requester ID (as read from the mmap address)
960 	 */
961 	iowrite32(req_id << 16 | NTB_CTRL_REQ_ID_EN,
962 		  &sndev->mmio_self_ctrl->req_id_table[1]);
963 
964 	rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
965 				   NTB_CTRL_PART_OP_CFG,
966 				   NTB_CTRL_PART_STATUS_NORMAL);
967 	if (rc == -EIO) {
968 		error = ioread32(&sndev->mmio_self_ctrl->req_id_error);
969 		dev_err(&sndev->stdev->dev,
970 			"Error setting up the requester ID table: %08x\n",
971 			error);
972 	}
973 
974 	return rc;
975 }
976 
977 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
978 {
979 	int i;
980 
981 	memset(sndev->self_shared, 0, LUT_SIZE);
982 	sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
983 	sndev->self_shared->partition_id = sndev->stdev->partition;
984 
985 	for (i = 0; i < sndev->nr_direct_mw; i++) {
986 		int bar = sndev->direct_mw_to_bar[i];
987 		resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
988 
989 		if (i == 0)
990 			sz = min_t(resource_size_t, sz,
991 				   LUT_SIZE * sndev->nr_lut_mw);
992 
993 		sndev->self_shared->mw_sizes[i] = sz;
994 	}
995 
996 	for (i = 0; i < sndev->nr_lut_mw; i++) {
997 		int idx = sndev->nr_direct_mw + i;
998 
999 		sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
1000 	}
1001 }
1002 
1003 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1004 {
1005 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
1006 	int self_bar = sndev->direct_mw_to_bar[0];
1007 	int peer_bar = sndev->peer_direct_mw_to_bar[0];
1008 	u32 ctl_val;
1009 	int rc;
1010 
1011 	sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
1012 						 LUT_SIZE,
1013 						 &sndev->self_shared_dma,
1014 						 GFP_KERNEL);
1015 	if (!sndev->self_shared) {
1016 		dev_err(&sndev->stdev->dev,
1017 			"unable to allocate memory for shared mw\n");
1018 		return -ENOMEM;
1019 	}
1020 
1021 	switchtec_ntb_init_shared(sndev);
1022 
1023 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
1024 				   NTB_CTRL_PART_STATUS_LOCKED);
1025 	if (rc)
1026 		goto unalloc_and_exit;
1027 
1028 	ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
1029 	ctl_val &= 0xFF;
1030 	ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
1031 	ctl_val |= ilog2(LUT_SIZE) << 8;
1032 	ctl_val |= (sndev->nr_lut_mw - 1) << 14;
1033 	iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
1034 
1035 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) |
1036 		   sndev->self_shared_dma),
1037 		  &ctl->lut_entry[0]);
1038 
1039 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
1040 				   NTB_CTRL_PART_STATUS_NORMAL);
1041 	if (rc) {
1042 		u32 bar_error, lut_error;
1043 
1044 		bar_error = ioread32(&ctl->bar_error);
1045 		lut_error = ioread32(&ctl->lut_error);
1046 		dev_err(&sndev->stdev->dev,
1047 			"Error setting up shared MW: %08x / %08x\n",
1048 			bar_error, lut_error);
1049 		goto unalloc_and_exit;
1050 	}
1051 
1052 	sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
1053 	if (!sndev->peer_shared) {
1054 		rc = -ENOMEM;
1055 		goto unalloc_and_exit;
1056 	}
1057 
1058 	dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
1059 	return 0;
1060 
1061 unalloc_and_exit:
1062 	dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1063 			  sndev->self_shared, sndev->self_shared_dma);
1064 
1065 	return rc;
1066 }
1067 
1068 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1069 {
1070 	if (sndev->peer_shared)
1071 		pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1072 
1073 	if (sndev->self_shared)
1074 		dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1075 				  sndev->self_shared,
1076 				  sndev->self_shared_dma);
1077 }
1078 
1079 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1080 {
1081 	struct switchtec_ntb *sndev = dev;
1082 
1083 	dev_dbg(&sndev->stdev->dev, "doorbell\n");
1084 
1085 	ntb_db_event(&sndev->ntb, 0);
1086 
1087 	return IRQ_HANDLED;
1088 }
1089 
1090 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1091 {
1092 	int i;
1093 	struct switchtec_ntb *sndev = dev;
1094 
1095 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1096 		u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1097 
1098 		if (msg & NTB_DBMSG_IMSG_STATUS) {
1099 			dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
1100 				i, (u32)msg);
1101 			iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1102 
1103 			if (i == LINK_MESSAGE)
1104 				switchtec_ntb_check_link(sndev);
1105 		}
1106 	}
1107 
1108 	return IRQ_HANDLED;
1109 }
1110 
1111 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1112 {
1113 	int i;
1114 	int rc;
1115 	int doorbell_irq = 0;
1116 	int message_irq = 0;
1117 	int event_irq;
1118 	int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1119 
1120 	event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1121 
1122 	while (doorbell_irq == event_irq)
1123 		doorbell_irq++;
1124 	while (message_irq == doorbell_irq ||
1125 	       message_irq == event_irq)
1126 		message_irq++;
1127 
1128 	dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
1129 		event_irq, doorbell_irq, message_irq);
1130 
1131 	for (i = 0; i < idb_vecs - 4; i++)
1132 		iowrite8(doorbell_irq,
1133 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1134 
1135 	for (; i < idb_vecs; i++)
1136 		iowrite8(message_irq,
1137 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1138 
1139 	sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1140 	sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1141 
1142 	rc = request_irq(sndev->doorbell_irq,
1143 			 switchtec_ntb_doorbell_isr, 0,
1144 			 "switchtec_ntb_doorbell", sndev);
1145 	if (rc)
1146 		return rc;
1147 
1148 	rc = request_irq(sndev->message_irq,
1149 			 switchtec_ntb_message_isr, 0,
1150 			 "switchtec_ntb_message", sndev);
1151 	if (rc) {
1152 		free_irq(sndev->doorbell_irq, sndev);
1153 		return rc;
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1160 {
1161 	free_irq(sndev->doorbell_irq, sndev);
1162 	free_irq(sndev->message_irq, sndev);
1163 }
1164 
1165 static int switchtec_ntb_add(struct device *dev,
1166 			     struct class_interface *class_intf)
1167 {
1168 	struct switchtec_dev *stdev = to_stdev(dev);
1169 	struct switchtec_ntb *sndev;
1170 	int rc;
1171 
1172 	stdev->sndev = NULL;
1173 
1174 	if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
1175 		return -ENODEV;
1176 
1177 	sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1178 	if (!sndev)
1179 		return -ENOMEM;
1180 
1181 	sndev->stdev = stdev;
1182 	rc = switchtec_ntb_init_sndev(sndev);
1183 	if (rc)
1184 		goto free_and_exit;
1185 
1186 	switchtec_ntb_init_mw(sndev);
1187 	switchtec_ntb_init_db(sndev);
1188 	switchtec_ntb_init_msgs(sndev);
1189 
1190 	rc = switchtec_ntb_init_req_id_table(sndev);
1191 	if (rc)
1192 		goto free_and_exit;
1193 
1194 	rc = switchtec_ntb_init_shared_mw(sndev);
1195 	if (rc)
1196 		goto free_and_exit;
1197 
1198 	rc = switchtec_ntb_init_db_msg_irq(sndev);
1199 	if (rc)
1200 		goto deinit_shared_and_exit;
1201 
1202 	rc = ntb_register_device(&sndev->ntb);
1203 	if (rc)
1204 		goto deinit_and_exit;
1205 
1206 	stdev->sndev = sndev;
1207 	stdev->link_notifier = switchtec_ntb_link_notification;
1208 	dev_info(dev, "NTB device registered\n");
1209 
1210 	return 0;
1211 
1212 deinit_and_exit:
1213 	switchtec_ntb_deinit_db_msg_irq(sndev);
1214 deinit_shared_and_exit:
1215 	switchtec_ntb_deinit_shared_mw(sndev);
1216 free_and_exit:
1217 	kfree(sndev);
1218 	dev_err(dev, "failed to register ntb device: %d\n", rc);
1219 	return rc;
1220 }
1221 
1222 void switchtec_ntb_remove(struct device *dev,
1223 			  struct class_interface *class_intf)
1224 {
1225 	struct switchtec_dev *stdev = to_stdev(dev);
1226 	struct switchtec_ntb *sndev = stdev->sndev;
1227 
1228 	if (!sndev)
1229 		return;
1230 
1231 	stdev->link_notifier = NULL;
1232 	stdev->sndev = NULL;
1233 	ntb_unregister_device(&sndev->ntb);
1234 	switchtec_ntb_deinit_db_msg_irq(sndev);
1235 	switchtec_ntb_deinit_shared_mw(sndev);
1236 	kfree(sndev);
1237 	dev_info(dev, "ntb device unregistered\n");
1238 }
1239 
1240 static struct class_interface switchtec_interface  = {
1241 	.add_dev = switchtec_ntb_add,
1242 	.remove_dev = switchtec_ntb_remove,
1243 };
1244 
1245 static int __init switchtec_ntb_init(void)
1246 {
1247 	switchtec_interface.class = switchtec_class;
1248 	return class_interface_register(&switchtec_interface);
1249 }
1250 module_init(switchtec_ntb_init);
1251 
1252 static void __exit switchtec_ntb_exit(void)
1253 {
1254 	class_interface_unregister(&switchtec_interface);
1255 }
1256 module_exit(switchtec_ntb_exit);
1257