xref: /linux/drivers/ntb/hw/mscc/ntb_hw_switchtec.c (revision 01752501820277d217a7b52548d9c948f98d2c56)
1 /*
2  * Microsemi Switchtec(tm) PCIe Management Driver
3  * Copyright (c) 2017, Microsemi Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <linux/switchtec.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/ntb.h>
22 
23 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
24 MODULE_VERSION("0.1");
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Microsemi Corporation");
27 
28 static ulong max_mw_size = SZ_2M;
29 module_param(max_mw_size, ulong, 0644);
30 MODULE_PARM_DESC(max_mw_size,
31 	"Max memory window size reported to the upper layer");
32 
33 static bool use_lut_mws;
34 module_param(use_lut_mws, bool, 0644);
35 MODULE_PARM_DESC(use_lut_mws,
36 		 "Enable the use of the LUT based memory windows");
37 
38 #ifndef ioread64
39 #ifdef readq
40 #define ioread64 readq
41 #else
42 #define ioread64 _ioread64
43 static inline u64 _ioread64(void __iomem *mmio)
44 {
45 	u64 low, high;
46 
47 	low = ioread32(mmio);
48 	high = ioread32(mmio + sizeof(u32));
49 	return low | (high << 32);
50 }
51 #endif
52 #endif
53 
54 #ifndef iowrite64
55 #ifdef writeq
56 #define iowrite64 writeq
57 #else
58 #define iowrite64 _iowrite64
59 static inline void _iowrite64(u64 val, void __iomem *mmio)
60 {
61 	iowrite32(val, mmio);
62 	iowrite32(val >> 32, mmio + sizeof(u32));
63 }
64 #endif
65 #endif
66 
67 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
68 #define MAX_MWS     128
69 
70 struct shared_mw {
71 	u32 magic;
72 	u32 link_sta;
73 	u32 partition_id;
74 	u64 mw_sizes[MAX_MWS];
75 	u32 spad[128];
76 };
77 
78 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
79 #define LUT_SIZE SZ_64K
80 
81 struct switchtec_ntb {
82 	struct ntb_dev ntb;
83 	struct switchtec_dev *stdev;
84 
85 	int self_partition;
86 	int peer_partition;
87 
88 	int doorbell_irq;
89 	int message_irq;
90 
91 	struct ntb_info_regs __iomem *mmio_ntb;
92 	struct ntb_ctrl_regs __iomem *mmio_ctrl;
93 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
94 	struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
95 	struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
96 	struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
97 
98 	void __iomem *mmio_xlink_win;
99 
100 	struct shared_mw *self_shared;
101 	struct shared_mw __iomem *peer_shared;
102 	dma_addr_t self_shared_dma;
103 
104 	u64 db_mask;
105 	u64 db_valid_mask;
106 	int db_shift;
107 	int db_peer_shift;
108 
109 	/* synchronize rmw access of db_mask and hw reg */
110 	spinlock_t db_mask_lock;
111 
112 	int nr_direct_mw;
113 	int nr_lut_mw;
114 	int nr_rsvd_luts;
115 	int direct_mw_to_bar[MAX_DIRECT_MW];
116 
117 	int peer_nr_direct_mw;
118 	int peer_nr_lut_mw;
119 	int peer_direct_mw_to_bar[MAX_DIRECT_MW];
120 
121 	bool link_is_up;
122 	enum ntb_speed link_speed;
123 	enum ntb_width link_width;
124 };
125 
126 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
127 {
128 	return container_of(ntb, struct switchtec_ntb, ntb);
129 }
130 
131 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
132 				 struct ntb_ctrl_regs __iomem *ctl,
133 				 u32 op, int wait_status)
134 {
135 	static const char * const op_text[] = {
136 		[NTB_CTRL_PART_OP_LOCK] = "lock",
137 		[NTB_CTRL_PART_OP_CFG] = "configure",
138 		[NTB_CTRL_PART_OP_RESET] = "reset",
139 	};
140 
141 	int i;
142 	u32 ps;
143 	int status;
144 
145 	switch (op) {
146 	case NTB_CTRL_PART_OP_LOCK:
147 		status = NTB_CTRL_PART_STATUS_LOCKING;
148 		break;
149 	case NTB_CTRL_PART_OP_CFG:
150 		status = NTB_CTRL_PART_STATUS_CONFIGURING;
151 		break;
152 	case NTB_CTRL_PART_OP_RESET:
153 		status = NTB_CTRL_PART_STATUS_RESETTING;
154 		break;
155 	default:
156 		return -EINVAL;
157 	}
158 
159 	iowrite32(op, &ctl->partition_op);
160 
161 	for (i = 0; i < 1000; i++) {
162 		if (msleep_interruptible(50) != 0) {
163 			iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
164 			return -EINTR;
165 		}
166 
167 		ps = ioread32(&ctl->partition_status) & 0xFFFF;
168 
169 		if (ps != status)
170 			break;
171 	}
172 
173 	if (ps == wait_status)
174 		return 0;
175 
176 	if (ps == status) {
177 		dev_err(&sndev->stdev->dev,
178 			"Timed out while performing %s (%d). (%08x)\n",
179 			op_text[op], op,
180 			ioread32(&ctl->partition_status));
181 
182 		return -ETIMEDOUT;
183 	}
184 
185 	return -EIO;
186 }
187 
188 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
189 				  u32 val)
190 {
191 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg))
192 		return -EINVAL;
193 
194 	iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg);
195 
196 	return 0;
197 }
198 
199 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
200 {
201 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
202 	int nr_direct_mw = sndev->peer_nr_direct_mw;
203 	int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
204 
205 	if (pidx != NTB_DEF_PEER_IDX)
206 		return -EINVAL;
207 
208 	if (!use_lut_mws)
209 		nr_lut_mw = 0;
210 
211 	return nr_direct_mw + nr_lut_mw;
212 }
213 
214 static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
215 {
216 	return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
217 }
218 
219 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
220 {
221 	return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
222 }
223 
224 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
225 				      int widx, resource_size_t *addr_align,
226 				      resource_size_t *size_align,
227 				      resource_size_t *size_max)
228 {
229 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
230 	int lut;
231 	resource_size_t size;
232 
233 	if (pidx != NTB_DEF_PEER_IDX)
234 		return -EINVAL;
235 
236 	lut = widx >= sndev->peer_nr_direct_mw;
237 	size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
238 
239 	if (size == 0)
240 		return -EINVAL;
241 
242 	if (addr_align)
243 		*addr_align = lut ? size : SZ_4K;
244 
245 	if (size_align)
246 		*size_align = lut ? size : SZ_4K;
247 
248 	if (size_max)
249 		*size_max = size;
250 
251 	return 0;
252 }
253 
254 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
255 {
256 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
257 	int bar = sndev->peer_direct_mw_to_bar[idx];
258 	u32 ctl_val;
259 
260 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
261 	ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
262 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
263 	iowrite32(0, &ctl->bar_entry[bar].win_size);
264 	iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
265 }
266 
267 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
268 {
269 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
270 
271 	iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
272 }
273 
274 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
275 					dma_addr_t addr, resource_size_t size)
276 {
277 	int xlate_pos = ilog2(size);
278 	int bar = sndev->peer_direct_mw_to_bar[idx];
279 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
280 	u32 ctl_val;
281 
282 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
283 	ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
284 
285 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
286 	iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
287 	iowrite64(sndev->self_partition | addr,
288 		  &ctl->bar_entry[bar].xlate_addr);
289 }
290 
291 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
292 				     dma_addr_t addr, resource_size_t size)
293 {
294 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
295 
296 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
297 		  &ctl->lut_entry[peer_lut_index(sndev, idx)]);
298 }
299 
300 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
301 				      dma_addr_t addr, resource_size_t size)
302 {
303 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
304 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
305 	int xlate_pos = ilog2(size);
306 	int nr_direct_mw = sndev->peer_nr_direct_mw;
307 	int rc;
308 
309 	if (pidx != NTB_DEF_PEER_IDX)
310 		return -EINVAL;
311 
312 	dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
313 		widx, pidx, &addr, &size);
314 
315 	if (widx >= switchtec_ntb_mw_count(ntb, pidx))
316 		return -EINVAL;
317 
318 	if (xlate_pos < 12)
319 		return -EINVAL;
320 
321 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
322 				   NTB_CTRL_PART_STATUS_LOCKED);
323 	if (rc)
324 		return rc;
325 
326 	if (addr == 0 || size == 0) {
327 		if (widx < nr_direct_mw)
328 			switchtec_ntb_mw_clr_direct(sndev, widx);
329 		else
330 			switchtec_ntb_mw_clr_lut(sndev, widx);
331 	} else {
332 		if (widx < nr_direct_mw)
333 			switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
334 		else
335 			switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
336 	}
337 
338 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
339 				   NTB_CTRL_PART_STATUS_NORMAL);
340 
341 	if (rc == -EIO) {
342 		dev_err(&sndev->stdev->dev,
343 			"Hardware reported an error configuring mw %d: %08x\n",
344 			widx, ioread32(&ctl->bar_error));
345 
346 		if (widx < nr_direct_mw)
347 			switchtec_ntb_mw_clr_direct(sndev, widx);
348 		else
349 			switchtec_ntb_mw_clr_lut(sndev, widx);
350 
351 		switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
352 				      NTB_CTRL_PART_STATUS_NORMAL);
353 	}
354 
355 	return rc;
356 }
357 
358 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
359 {
360 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
361 	int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
362 
363 	return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
364 }
365 
366 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
367 					 int idx, phys_addr_t *base,
368 					 resource_size_t *size)
369 {
370 	int bar = sndev->direct_mw_to_bar[idx];
371 	size_t offset = 0;
372 
373 	if (bar < 0)
374 		return -EINVAL;
375 
376 	if (idx == 0) {
377 		/*
378 		 * This is the direct BAR shared with the LUTs
379 		 * which means the actual window will be offset
380 		 * by the size of all the LUT entries.
381 		 */
382 
383 		offset = LUT_SIZE * sndev->nr_lut_mw;
384 	}
385 
386 	if (base)
387 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
388 
389 	if (size) {
390 		*size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
391 		if (offset && *size > offset)
392 			*size = offset;
393 
394 		if (*size > max_mw_size)
395 			*size = max_mw_size;
396 	}
397 
398 	return 0;
399 }
400 
401 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
402 				      int idx, phys_addr_t *base,
403 				      resource_size_t *size)
404 {
405 	int bar = sndev->direct_mw_to_bar[0];
406 	int offset;
407 
408 	offset = LUT_SIZE * lut_index(sndev, idx);
409 
410 	if (base)
411 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
412 
413 	if (size)
414 		*size = LUT_SIZE;
415 
416 	return 0;
417 }
418 
419 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
420 					  phys_addr_t *base,
421 					  resource_size_t *size)
422 {
423 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
424 
425 	if (idx < sndev->nr_direct_mw)
426 		return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
427 	else if (idx < switchtec_ntb_peer_mw_count(ntb))
428 		return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
429 	else
430 		return -EINVAL;
431 }
432 
433 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
434 					  int partition,
435 					  enum ntb_speed *speed,
436 					  enum ntb_width *width)
437 {
438 	struct switchtec_dev *stdev = sndev->stdev;
439 
440 	u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
441 	u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
442 
443 	if (speed)
444 		*speed = (linksta >> 16) & 0xF;
445 
446 	if (width)
447 		*width = (linksta >> 20) & 0x3F;
448 }
449 
450 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
451 {
452 	enum ntb_speed self_speed, peer_speed;
453 	enum ntb_width self_width, peer_width;
454 
455 	if (!sndev->link_is_up) {
456 		sndev->link_speed = NTB_SPEED_NONE;
457 		sndev->link_width = NTB_WIDTH_NONE;
458 		return;
459 	}
460 
461 	switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
462 				      &self_speed, &self_width);
463 	switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
464 				      &peer_speed, &peer_width);
465 
466 	sndev->link_speed = min(self_speed, peer_speed);
467 	sndev->link_width = min(self_width, peer_width);
468 }
469 
470 static int crosslink_is_enabled(struct switchtec_ntb *sndev)
471 {
472 	struct ntb_info_regs __iomem *inf = sndev->mmio_ntb;
473 
474 	return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled);
475 }
476 
477 enum {
478 	LINK_MESSAGE = 0,
479 	MSG_LINK_UP = 1,
480 	MSG_LINK_DOWN = 2,
481 	MSG_CHECK_LINK = 3,
482 };
483 
484 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
485 {
486 	int link_sta;
487 	int old = sndev->link_is_up;
488 
489 	link_sta = sndev->self_shared->link_sta;
490 	if (link_sta) {
491 		u64 peer = ioread64(&sndev->peer_shared->magic);
492 
493 		if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
494 			link_sta = peer >> 32;
495 		else
496 			link_sta = 0;
497 	}
498 
499 	sndev->link_is_up = link_sta;
500 	switchtec_ntb_set_link_speed(sndev);
501 
502 	if (link_sta != old) {
503 		switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
504 		ntb_link_event(&sndev->ntb);
505 		dev_info(&sndev->stdev->dev, "ntb link %s\n",
506 			 link_sta ? "up" : "down");
507 	}
508 }
509 
510 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
511 {
512 	struct switchtec_ntb *sndev = stdev->sndev;
513 
514 	switchtec_ntb_check_link(sndev);
515 }
516 
517 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
518 				    enum ntb_speed *speed,
519 				    enum ntb_width *width)
520 {
521 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
522 
523 	if (speed)
524 		*speed = sndev->link_speed;
525 	if (width)
526 		*width = sndev->link_width;
527 
528 	return sndev->link_is_up;
529 }
530 
531 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
532 				     enum ntb_speed max_speed,
533 				     enum ntb_width max_width)
534 {
535 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
536 
537 	dev_dbg(&sndev->stdev->dev, "enabling link\n");
538 
539 	sndev->self_shared->link_sta = 1;
540 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
541 
542 	switchtec_ntb_check_link(sndev);
543 
544 	return 0;
545 }
546 
547 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
548 {
549 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
550 
551 	dev_dbg(&sndev->stdev->dev, "disabling link\n");
552 
553 	sndev->self_shared->link_sta = 0;
554 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
555 
556 	switchtec_ntb_check_link(sndev);
557 
558 	return 0;
559 }
560 
561 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
562 {
563 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
564 
565 	return sndev->db_valid_mask;
566 }
567 
568 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
569 {
570 	return 1;
571 }
572 
573 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
574 {
575 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
576 
577 	if (db_vector < 0 || db_vector > 1)
578 		return 0;
579 
580 	return sndev->db_valid_mask;
581 }
582 
583 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
584 {
585 	u64 ret;
586 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
587 
588 	ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
589 
590 	return ret & sndev->db_valid_mask;
591 }
592 
593 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
594 {
595 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
596 
597 	iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
598 
599 	return 0;
600 }
601 
602 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
603 {
604 	unsigned long irqflags;
605 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
606 
607 	if (db_bits & ~sndev->db_valid_mask)
608 		return -EINVAL;
609 
610 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
611 
612 	sndev->db_mask |= db_bits << sndev->db_shift;
613 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
614 
615 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
616 
617 	return 0;
618 }
619 
620 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
621 {
622 	unsigned long irqflags;
623 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
624 
625 	if (db_bits & ~sndev->db_valid_mask)
626 		return -EINVAL;
627 
628 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
629 
630 	sndev->db_mask &= ~(db_bits << sndev->db_shift);
631 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
632 
633 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
634 
635 	return 0;
636 }
637 
638 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
639 {
640 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
641 
642 	return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
643 }
644 
645 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
646 				      phys_addr_t *db_addr,
647 				      resource_size_t *db_size)
648 {
649 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
650 	unsigned long offset;
651 
652 	offset = (unsigned long)sndev->mmio_self_dbmsg->odb -
653 		(unsigned long)sndev->stdev->mmio;
654 
655 	offset += sndev->db_shift / 8;
656 
657 	if (db_addr)
658 		*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
659 	if (db_size)
660 		*db_size = sizeof(u32);
661 
662 	return 0;
663 }
664 
665 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
666 {
667 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
668 
669 	iowrite64(db_bits << sndev->db_peer_shift,
670 		  &sndev->mmio_self_dbmsg->odb);
671 
672 	return 0;
673 }
674 
675 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
676 {
677 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
678 
679 	return ARRAY_SIZE(sndev->self_shared->spad);
680 }
681 
682 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
683 {
684 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
685 
686 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
687 		return 0;
688 
689 	if (!sndev->self_shared)
690 		return 0;
691 
692 	return sndev->self_shared->spad[idx];
693 }
694 
695 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
696 {
697 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
698 
699 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
700 		return -EINVAL;
701 
702 	if (!sndev->self_shared)
703 		return -EIO;
704 
705 	sndev->self_shared->spad[idx] = val;
706 
707 	return 0;
708 }
709 
710 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
711 					int sidx)
712 {
713 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
714 
715 	if (pidx != NTB_DEF_PEER_IDX)
716 		return -EINVAL;
717 
718 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
719 		return 0;
720 
721 	if (!sndev->peer_shared)
722 		return 0;
723 
724 	return ioread32(&sndev->peer_shared->spad[sidx]);
725 }
726 
727 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
728 					 int sidx, u32 val)
729 {
730 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
731 
732 	if (pidx != NTB_DEF_PEER_IDX)
733 		return -EINVAL;
734 
735 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
736 		return -EINVAL;
737 
738 	if (!sndev->peer_shared)
739 		return -EIO;
740 
741 	iowrite32(val, &sndev->peer_shared->spad[sidx]);
742 
743 	return 0;
744 }
745 
746 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
747 					int sidx, phys_addr_t *spad_addr)
748 {
749 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
750 	unsigned long offset;
751 
752 	if (pidx != NTB_DEF_PEER_IDX)
753 		return -EINVAL;
754 
755 	offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
756 		(unsigned long)sndev->stdev->mmio;
757 
758 	if (spad_addr)
759 		*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
760 
761 	return 0;
762 }
763 
764 static const struct ntb_dev_ops switchtec_ntb_ops = {
765 	.mw_count		= switchtec_ntb_mw_count,
766 	.mw_get_align		= switchtec_ntb_mw_get_align,
767 	.mw_set_trans		= switchtec_ntb_mw_set_trans,
768 	.peer_mw_count		= switchtec_ntb_peer_mw_count,
769 	.peer_mw_get_addr	= switchtec_ntb_peer_mw_get_addr,
770 	.link_is_up		= switchtec_ntb_link_is_up,
771 	.link_enable		= switchtec_ntb_link_enable,
772 	.link_disable		= switchtec_ntb_link_disable,
773 	.db_valid_mask		= switchtec_ntb_db_valid_mask,
774 	.db_vector_count	= switchtec_ntb_db_vector_count,
775 	.db_vector_mask		= switchtec_ntb_db_vector_mask,
776 	.db_read		= switchtec_ntb_db_read,
777 	.db_clear		= switchtec_ntb_db_clear,
778 	.db_set_mask		= switchtec_ntb_db_set_mask,
779 	.db_clear_mask		= switchtec_ntb_db_clear_mask,
780 	.db_read_mask		= switchtec_ntb_db_read_mask,
781 	.peer_db_addr		= switchtec_ntb_peer_db_addr,
782 	.peer_db_set		= switchtec_ntb_peer_db_set,
783 	.spad_count		= switchtec_ntb_spad_count,
784 	.spad_read		= switchtec_ntb_spad_read,
785 	.spad_write		= switchtec_ntb_spad_write,
786 	.peer_spad_read		= switchtec_ntb_peer_spad_read,
787 	.peer_spad_write	= switchtec_ntb_peer_spad_write,
788 	.peer_spad_addr		= switchtec_ntb_peer_spad_addr,
789 };
790 
791 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
792 {
793 	u64 tpart_vec;
794 	int self;
795 	u64 part_map;
796 	int bit;
797 
798 	sndev->ntb.pdev = sndev->stdev->pdev;
799 	sndev->ntb.topo = NTB_TOPO_SWITCH;
800 	sndev->ntb.ops = &switchtec_ntb_ops;
801 
802 	sndev->self_partition = sndev->stdev->partition;
803 
804 	sndev->mmio_ntb = sndev->stdev->mmio_ntb;
805 
806 	self = sndev->self_partition;
807 	tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
808 	tpart_vec <<= 32;
809 	tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
810 
811 	part_map = ioread64(&sndev->mmio_ntb->ep_map);
812 	part_map &= ~(1 << sndev->self_partition);
813 
814 	if (!ffs(tpart_vec)) {
815 		if (sndev->stdev->partition_count != 2) {
816 			dev_err(&sndev->stdev->dev,
817 				"ntb target partition not defined\n");
818 			return -ENODEV;
819 		}
820 
821 		bit = ffs(part_map);
822 		if (!bit) {
823 			dev_err(&sndev->stdev->dev,
824 				"peer partition is not NT partition\n");
825 			return -ENODEV;
826 		}
827 
828 		sndev->peer_partition = bit - 1;
829 	} else {
830 		if (ffs(tpart_vec) != fls(tpart_vec)) {
831 			dev_err(&sndev->stdev->dev,
832 				"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
833 			return -ENODEV;
834 		}
835 
836 		sndev->peer_partition = ffs(tpart_vec) - 1;
837 		if (!(part_map && (1 << sndev->peer_partition))) {
838 			dev_err(&sndev->stdev->dev,
839 				"ntb target partition is not NT partition\n");
840 			return -ENODEV;
841 		}
842 	}
843 
844 	dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
845 		sndev->self_partition, sndev->stdev->partition_count);
846 
847 	sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
848 		SWITCHTEC_NTB_REG_CTRL_OFFSET;
849 	sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
850 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
851 
852 	sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
853 	sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
854 	sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
855 
856 	return 0;
857 }
858 
859 static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
860 			       struct ntb_ctrl_regs __iomem *ctl,
861 			       int lut_idx, int partition, u64 addr)
862 {
863 	int peer_bar = sndev->peer_direct_mw_to_bar[0];
864 	u32 ctl_val;
865 	int rc;
866 
867 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
868 				   NTB_CTRL_PART_STATUS_LOCKED);
869 	if (rc)
870 		return rc;
871 
872 	ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
873 	ctl_val &= 0xFF;
874 	ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
875 	ctl_val |= ilog2(LUT_SIZE) << 8;
876 	ctl_val |= (sndev->nr_lut_mw - 1) << 14;
877 	iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
878 
879 	iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
880 		  &ctl->lut_entry[lut_idx]);
881 
882 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
883 				   NTB_CTRL_PART_STATUS_NORMAL);
884 	if (rc) {
885 		u32 bar_error, lut_error;
886 
887 		bar_error = ioread32(&ctl->bar_error);
888 		lut_error = ioread32(&ctl->lut_error);
889 		dev_err(&sndev->stdev->dev,
890 			"Error setting up reserved lut window: %08x / %08x\n",
891 			bar_error, lut_error);
892 		return rc;
893 	}
894 
895 	return 0;
896 }
897 
898 static int config_req_id_table(struct switchtec_ntb *sndev,
899 			       struct ntb_ctrl_regs __iomem *mmio_ctrl,
900 			       int *req_ids, int count)
901 {
902 	int i, rc = 0;
903 	u32 error;
904 	u32 proxy_id;
905 
906 	if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
907 		dev_err(&sndev->stdev->dev,
908 			"Not enough requester IDs available.\n");
909 		return -EFAULT;
910 	}
911 
912 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
913 				   NTB_CTRL_PART_OP_LOCK,
914 				   NTB_CTRL_PART_STATUS_LOCKED);
915 	if (rc)
916 		return rc;
917 
918 	iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
919 		  &mmio_ctrl->partition_ctrl);
920 
921 	for (i = 0; i < count; i++) {
922 		iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
923 			  &mmio_ctrl->req_id_table[i]);
924 
925 		proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
926 		dev_dbg(&sndev->stdev->dev,
927 			"Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
928 			req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
929 			req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
930 			(proxy_id >> 1) & 0x7);
931 	}
932 
933 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
934 				   NTB_CTRL_PART_OP_CFG,
935 				   NTB_CTRL_PART_STATUS_NORMAL);
936 
937 	if (rc == -EIO) {
938 		error = ioread32(&mmio_ctrl->req_id_error);
939 		dev_err(&sndev->stdev->dev,
940 			"Error setting up the requester ID table: %08x\n",
941 			error);
942 	}
943 
944 	return 0;
945 }
946 
947 static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
948 			       u64 *mw_addrs, int mw_count)
949 {
950 	int rc, i;
951 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl;
952 	u64 addr;
953 	size_t size, offset;
954 	int bar;
955 	int xlate_pos;
956 	u32 ctl_val;
957 
958 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
959 				   NTB_CTRL_PART_STATUS_LOCKED);
960 	if (rc)
961 		return rc;
962 
963 	for (i = 0; i < sndev->nr_lut_mw; i++) {
964 		if (i == ntb_lut_idx)
965 			continue;
966 
967 		addr = mw_addrs[0] + LUT_SIZE * i;
968 
969 		iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) |
970 			   addr),
971 			  &ctl->lut_entry[i]);
972 	}
973 
974 	sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count);
975 
976 	for (i = 0; i < sndev->nr_direct_mw; i++) {
977 		bar = sndev->direct_mw_to_bar[i];
978 		offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0;
979 		addr = mw_addrs[i] + offset;
980 		size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
981 		xlate_pos = ilog2(size);
982 
983 		if (offset && size > offset)
984 			size = offset;
985 
986 		ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
987 		ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
988 
989 		iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
990 		iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
991 		iowrite64(sndev->peer_partition | addr,
992 			  &ctl->bar_entry[bar].xlate_addr);
993 	}
994 
995 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
996 				   NTB_CTRL_PART_STATUS_NORMAL);
997 	if (rc) {
998 		u32 bar_error, lut_error;
999 
1000 		bar_error = ioread32(&ctl->bar_error);
1001 		lut_error = ioread32(&ctl->lut_error);
1002 		dev_err(&sndev->stdev->dev,
1003 			"Error setting up cross link windows: %08x / %08x\n",
1004 			bar_error, lut_error);
1005 		return rc;
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 static int crosslink_setup_req_ids(struct switchtec_ntb *sndev,
1012 	struct ntb_ctrl_regs __iomem *mmio_ctrl)
1013 {
1014 	int req_ids[16];
1015 	int i;
1016 	u32 proxy_id;
1017 
1018 	for (i = 0; i < ARRAY_SIZE(req_ids); i++) {
1019 		proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]);
1020 
1021 		if (!(proxy_id & NTB_CTRL_REQ_ID_EN))
1022 			break;
1023 
1024 		req_ids[i] = ((proxy_id >> 1) & 0xFF);
1025 	}
1026 
1027 	return config_req_id_table(sndev, mmio_ctrl, req_ids, i);
1028 }
1029 
1030 /*
1031  * In crosslink configuration there is a virtual partition in the
1032  * middle of the two switches. The BARs in this partition have to be
1033  * enumerated and assigned addresses.
1034  */
1035 static int crosslink_enum_partition(struct switchtec_ntb *sndev,
1036 				    u64 *bar_addrs)
1037 {
1038 	struct part_cfg_regs __iomem *part_cfg =
1039 		&sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
1040 	u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
1041 	struct pff_csr_regs __iomem *mmio_pff =
1042 		&sndev->stdev->mmio_pff_csr[pff];
1043 	const u64 bar_space = 0x1000000000LL;
1044 	u64 bar_addr;
1045 	int bar_cnt = 0;
1046 	int i;
1047 
1048 	iowrite16(0x6, &mmio_pff->pcicmd);
1049 
1050 	for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) {
1051 		iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]);
1052 		bar_addr = ioread64(&mmio_pff->pci_bar64[i]);
1053 		bar_addr &= ~0xf;
1054 
1055 		dev_dbg(&sndev->stdev->dev,
1056 			"Crosslink BAR%d addr: %llx\n",
1057 			i, bar_addr);
1058 
1059 		if (bar_addr != bar_space * i)
1060 			continue;
1061 
1062 		bar_addrs[bar_cnt++] = bar_addr;
1063 	}
1064 
1065 	return bar_cnt;
1066 }
1067 
1068 static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev)
1069 {
1070 	int rc;
1071 	int bar = sndev->direct_mw_to_bar[0];
1072 	const int ntb_lut_idx = 1;
1073 	u64 bar_addrs[6];
1074 	u64 addr;
1075 	int bar_cnt;
1076 
1077 	if (!crosslink_is_enabled(sndev))
1078 		return 0;
1079 
1080 	dev_info(&sndev->stdev->dev, "Using crosslink configuration\n");
1081 	sndev->ntb.topo = NTB_TOPO_CROSSLINK;
1082 
1083 	bar_cnt = crosslink_enum_partition(sndev, bar_addrs);
1084 	if (bar_cnt < sndev->nr_direct_mw + 1) {
1085 		dev_err(&sndev->stdev->dev,
1086 			"Error enumerating crosslink partition\n");
1087 		return -EINVAL;
1088 	}
1089 
1090 	addr = bar_addrs[0];
1091 	rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx,
1092 				 sndev->peer_partition, addr);
1093 	if (rc)
1094 		return rc;
1095 
1096 	rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1],
1097 				 bar_cnt - 1);
1098 	if (rc)
1099 		return rc;
1100 
1101 	rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl);
1102 	if (rc)
1103 		return rc;
1104 
1105 	sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar,
1106 						LUT_SIZE, LUT_SIZE);
1107 	if (!sndev->mmio_xlink_win) {
1108 		rc = -ENOMEM;
1109 		return rc;
1110 	}
1111 
1112 	sndev->nr_rsvd_luts++;
1113 
1114 	return 0;
1115 }
1116 
1117 static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev)
1118 {
1119 	if (sndev->mmio_xlink_win)
1120 		pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win);
1121 }
1122 
1123 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
1124 {
1125 	int i;
1126 	int cnt = 0;
1127 
1128 	for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
1129 		u32 r = ioread32(&ctrl->bar_entry[i].ctl);
1130 
1131 		if (r & NTB_CTRL_BAR_VALID)
1132 			map[cnt++] = i;
1133 	}
1134 
1135 	return cnt;
1136 }
1137 
1138 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
1139 {
1140 	sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
1141 				       sndev->mmio_self_ctrl);
1142 
1143 	sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
1144 	sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
1145 
1146 	dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
1147 		sndev->nr_direct_mw, sndev->nr_lut_mw);
1148 
1149 	sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
1150 					    sndev->mmio_peer_ctrl);
1151 
1152 	sndev->peer_nr_lut_mw =
1153 		ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
1154 	sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
1155 
1156 	dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
1157 		sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
1158 
1159 }
1160 
1161 /*
1162  * There are 64 doorbells in the switch hardware but this is
1163  * shared among all partitions. So we must split them in half
1164  * (32 for each partition). However, the message interrupts are
1165  * also shared with the top 4 doorbells so we just limit this to
1166  * 28 doorbells per partition
1167  */
1168 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
1169 {
1170 	sndev->db_valid_mask = 0x0FFFFFFF;
1171 
1172 	if (sndev->self_partition < sndev->peer_partition) {
1173 		sndev->db_shift = 0;
1174 		sndev->db_peer_shift = 32;
1175 	} else {
1176 		sndev->db_shift = 32;
1177 		sndev->db_peer_shift = 0;
1178 	}
1179 
1180 	sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
1181 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
1182 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
1183 		  &sndev->mmio_self_dbmsg->odb_mask);
1184 }
1185 
1186 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
1187 {
1188 	int i;
1189 	u32 msg_map = 0;
1190 
1191 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1192 		int m = i | sndev->peer_partition << 2;
1193 
1194 		msg_map |= m << i * 8;
1195 	}
1196 
1197 	iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
1198 
1199 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
1200 		iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
1201 			  &sndev->mmio_self_dbmsg->imsg[i]);
1202 }
1203 
1204 static int
1205 switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
1206 {
1207 	int req_ids[2];
1208 
1209 	/*
1210 	 * Root Complex Requester ID (which is 0:00.0)
1211 	 */
1212 	req_ids[0] = 0;
1213 
1214 	/*
1215 	 * Host Bridge Requester ID (as read from the mmap address)
1216 	 */
1217 	req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
1218 
1219 	return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
1220 				   ARRAY_SIZE(req_ids));
1221 }
1222 
1223 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
1224 {
1225 	int i;
1226 
1227 	memset(sndev->self_shared, 0, LUT_SIZE);
1228 	sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
1229 	sndev->self_shared->partition_id = sndev->stdev->partition;
1230 
1231 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1232 		int bar = sndev->direct_mw_to_bar[i];
1233 		resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
1234 
1235 		if (i == 0)
1236 			sz = min_t(resource_size_t, sz,
1237 				   LUT_SIZE * sndev->nr_lut_mw);
1238 
1239 		sndev->self_shared->mw_sizes[i] = sz;
1240 	}
1241 
1242 	for (i = 0; i < sndev->nr_lut_mw; i++) {
1243 		int idx = sndev->nr_direct_mw + i;
1244 
1245 		sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
1246 	}
1247 }
1248 
1249 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1250 {
1251 	int self_bar = sndev->direct_mw_to_bar[0];
1252 	int rc;
1253 
1254 	sndev->nr_rsvd_luts++;
1255 	sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
1256 						 LUT_SIZE,
1257 						 &sndev->self_shared_dma,
1258 						 GFP_KERNEL);
1259 	if (!sndev->self_shared) {
1260 		dev_err(&sndev->stdev->dev,
1261 			"unable to allocate memory for shared mw\n");
1262 		return -ENOMEM;
1263 	}
1264 
1265 	switchtec_ntb_init_shared(sndev);
1266 
1267 	rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
1268 				 sndev->self_partition,
1269 				 sndev->self_shared_dma);
1270 	if (rc)
1271 		goto unalloc_and_exit;
1272 
1273 	sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
1274 	if (!sndev->peer_shared) {
1275 		rc = -ENOMEM;
1276 		goto unalloc_and_exit;
1277 	}
1278 
1279 	dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
1280 	return 0;
1281 
1282 unalloc_and_exit:
1283 	dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1284 			  sndev->self_shared, sndev->self_shared_dma);
1285 
1286 	return rc;
1287 }
1288 
1289 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1290 {
1291 	if (sndev->peer_shared)
1292 		pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1293 
1294 	if (sndev->self_shared)
1295 		dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1296 				  sndev->self_shared,
1297 				  sndev->self_shared_dma);
1298 	sndev->nr_rsvd_luts--;
1299 }
1300 
1301 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1302 {
1303 	struct switchtec_ntb *sndev = dev;
1304 
1305 	dev_dbg(&sndev->stdev->dev, "doorbell\n");
1306 
1307 	ntb_db_event(&sndev->ntb, 0);
1308 
1309 	return IRQ_HANDLED;
1310 }
1311 
1312 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1313 {
1314 	int i;
1315 	struct switchtec_ntb *sndev = dev;
1316 
1317 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1318 		u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1319 
1320 		if (msg & NTB_DBMSG_IMSG_STATUS) {
1321 			dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
1322 				i, (u32)msg);
1323 			iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1324 
1325 			if (i == LINK_MESSAGE)
1326 				switchtec_ntb_check_link(sndev);
1327 		}
1328 	}
1329 
1330 	return IRQ_HANDLED;
1331 }
1332 
1333 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1334 {
1335 	int i;
1336 	int rc;
1337 	int doorbell_irq = 0;
1338 	int message_irq = 0;
1339 	int event_irq;
1340 	int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1341 
1342 	event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1343 
1344 	while (doorbell_irq == event_irq)
1345 		doorbell_irq++;
1346 	while (message_irq == doorbell_irq ||
1347 	       message_irq == event_irq)
1348 		message_irq++;
1349 
1350 	dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
1351 		event_irq, doorbell_irq, message_irq);
1352 
1353 	for (i = 0; i < idb_vecs - 4; i++)
1354 		iowrite8(doorbell_irq,
1355 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1356 
1357 	for (; i < idb_vecs; i++)
1358 		iowrite8(message_irq,
1359 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1360 
1361 	sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1362 	sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1363 
1364 	rc = request_irq(sndev->doorbell_irq,
1365 			 switchtec_ntb_doorbell_isr, 0,
1366 			 "switchtec_ntb_doorbell", sndev);
1367 	if (rc)
1368 		return rc;
1369 
1370 	rc = request_irq(sndev->message_irq,
1371 			 switchtec_ntb_message_isr, 0,
1372 			 "switchtec_ntb_message", sndev);
1373 	if (rc) {
1374 		free_irq(sndev->doorbell_irq, sndev);
1375 		return rc;
1376 	}
1377 
1378 	return 0;
1379 }
1380 
1381 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1382 {
1383 	free_irq(sndev->doorbell_irq, sndev);
1384 	free_irq(sndev->message_irq, sndev);
1385 }
1386 
1387 static int switchtec_ntb_add(struct device *dev,
1388 			     struct class_interface *class_intf)
1389 {
1390 	struct switchtec_dev *stdev = to_stdev(dev);
1391 	struct switchtec_ntb *sndev;
1392 	int rc;
1393 
1394 	stdev->sndev = NULL;
1395 
1396 	if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
1397 		return -ENODEV;
1398 
1399 	sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1400 	if (!sndev)
1401 		return -ENOMEM;
1402 
1403 	sndev->stdev = stdev;
1404 	rc = switchtec_ntb_init_sndev(sndev);
1405 	if (rc)
1406 		goto free_and_exit;
1407 
1408 	switchtec_ntb_init_mw(sndev);
1409 
1410 	rc = switchtec_ntb_init_req_id_table(sndev);
1411 	if (rc)
1412 		goto free_and_exit;
1413 
1414 	rc = switchtec_ntb_init_crosslink(sndev);
1415 	if (rc)
1416 		goto free_and_exit;
1417 
1418 	switchtec_ntb_init_db(sndev);
1419 	switchtec_ntb_init_msgs(sndev);
1420 
1421 	rc = switchtec_ntb_init_shared_mw(sndev);
1422 	if (rc)
1423 		goto deinit_crosslink;
1424 
1425 	rc = switchtec_ntb_init_db_msg_irq(sndev);
1426 	if (rc)
1427 		goto deinit_shared_and_exit;
1428 
1429 	rc = ntb_register_device(&sndev->ntb);
1430 	if (rc)
1431 		goto deinit_and_exit;
1432 
1433 	stdev->sndev = sndev;
1434 	stdev->link_notifier = switchtec_ntb_link_notification;
1435 	dev_info(dev, "NTB device registered\n");
1436 
1437 	return 0;
1438 
1439 deinit_and_exit:
1440 	switchtec_ntb_deinit_db_msg_irq(sndev);
1441 deinit_shared_and_exit:
1442 	switchtec_ntb_deinit_shared_mw(sndev);
1443 deinit_crosslink:
1444 	switchtec_ntb_deinit_crosslink(sndev);
1445 free_and_exit:
1446 	kfree(sndev);
1447 	dev_err(dev, "failed to register ntb device: %d\n", rc);
1448 	return rc;
1449 }
1450 
1451 void switchtec_ntb_remove(struct device *dev,
1452 			  struct class_interface *class_intf)
1453 {
1454 	struct switchtec_dev *stdev = to_stdev(dev);
1455 	struct switchtec_ntb *sndev = stdev->sndev;
1456 
1457 	if (!sndev)
1458 		return;
1459 
1460 	stdev->link_notifier = NULL;
1461 	stdev->sndev = NULL;
1462 	ntb_unregister_device(&sndev->ntb);
1463 	switchtec_ntb_deinit_db_msg_irq(sndev);
1464 	switchtec_ntb_deinit_shared_mw(sndev);
1465 	switchtec_ntb_deinit_crosslink(sndev);
1466 	kfree(sndev);
1467 	dev_info(dev, "ntb device unregistered\n");
1468 }
1469 
1470 static struct class_interface switchtec_interface  = {
1471 	.add_dev = switchtec_ntb_add,
1472 	.remove_dev = switchtec_ntb_remove,
1473 };
1474 
1475 static int __init switchtec_ntb_init(void)
1476 {
1477 	switchtec_interface.class = switchtec_class;
1478 	return class_interface_register(&switchtec_interface);
1479 }
1480 module_init(switchtec_ntb_init);
1481 
1482 static void __exit switchtec_ntb_exit(void)
1483 {
1484 	class_interface_unregister(&switchtec_interface);
1485 }
1486 module_exit(switchtec_ntb_exit);
1487