xref: /linux/drivers/ntb/hw/mscc/ntb_hw_switchtec.c (revision 270d32e63c70c808a91449da24324e0009827c5f)
1 /*
2  * Microsemi Switchtec(tm) PCIe Management Driver
3  * Copyright (c) 2017, Microsemi Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <linux/switchtec.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/ntb.h>
22 
23 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
24 MODULE_VERSION("0.1");
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Microsemi Corporation");
27 
28 static ulong max_mw_size = SZ_2M;
29 module_param(max_mw_size, ulong, 0644);
30 MODULE_PARM_DESC(max_mw_size,
31 	"Max memory window size reported to the upper layer");
32 
33 static bool use_lut_mws;
34 module_param(use_lut_mws, bool, 0644);
35 MODULE_PARM_DESC(use_lut_mws,
36 		 "Enable the use of the LUT based memory windows");
37 
38 #ifndef ioread64
39 #ifdef readq
40 #define ioread64 readq
41 #else
42 #define ioread64 _ioread64
43 static inline u64 _ioread64(void __iomem *mmio)
44 {
45 	u64 low, high;
46 
47 	low = ioread32(mmio);
48 	high = ioread32(mmio + sizeof(u32));
49 	return low | (high << 32);
50 }
51 #endif
52 #endif
53 
54 #ifndef iowrite64
55 #ifdef writeq
56 #define iowrite64 writeq
57 #else
58 #define iowrite64 _iowrite64
59 static inline void _iowrite64(u64 val, void __iomem *mmio)
60 {
61 	iowrite32(val, mmio);
62 	iowrite32(val >> 32, mmio + sizeof(u32));
63 }
64 #endif
65 #endif
66 
67 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
68 #define MAX_MWS     128
69 
70 struct shared_mw {
71 	u32 magic;
72 	u32 link_sta;
73 	u32 partition_id;
74 	u64 mw_sizes[MAX_MWS];
75 	u32 spad[128];
76 };
77 
78 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
79 #define LUT_SIZE SZ_64K
80 
81 struct switchtec_ntb {
82 	struct ntb_dev ntb;
83 	struct switchtec_dev *stdev;
84 
85 	int self_partition;
86 	int peer_partition;
87 
88 	int doorbell_irq;
89 	int message_irq;
90 
91 	struct ntb_info_regs __iomem *mmio_ntb;
92 	struct ntb_ctrl_regs __iomem *mmio_ctrl;
93 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
94 	struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
95 	struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
96 	struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
97 	struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg;
98 
99 	void __iomem *mmio_xlink_win;
100 
101 	struct shared_mw *self_shared;
102 	struct shared_mw __iomem *peer_shared;
103 	dma_addr_t self_shared_dma;
104 
105 	u64 db_mask;
106 	u64 db_valid_mask;
107 	int db_shift;
108 	int db_peer_shift;
109 
110 	/* synchronize rmw access of db_mask and hw reg */
111 	spinlock_t db_mask_lock;
112 
113 	int nr_direct_mw;
114 	int nr_lut_mw;
115 	int nr_rsvd_luts;
116 	int direct_mw_to_bar[MAX_DIRECT_MW];
117 
118 	int peer_nr_direct_mw;
119 	int peer_nr_lut_mw;
120 	int peer_direct_mw_to_bar[MAX_DIRECT_MW];
121 
122 	bool link_is_up;
123 	enum ntb_speed link_speed;
124 	enum ntb_width link_width;
125 };
126 
127 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
128 {
129 	return container_of(ntb, struct switchtec_ntb, ntb);
130 }
131 
132 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
133 				 struct ntb_ctrl_regs __iomem *ctl,
134 				 u32 op, int wait_status)
135 {
136 	static const char * const op_text[] = {
137 		[NTB_CTRL_PART_OP_LOCK] = "lock",
138 		[NTB_CTRL_PART_OP_CFG] = "configure",
139 		[NTB_CTRL_PART_OP_RESET] = "reset",
140 	};
141 
142 	int i;
143 	u32 ps;
144 	int status;
145 
146 	switch (op) {
147 	case NTB_CTRL_PART_OP_LOCK:
148 		status = NTB_CTRL_PART_STATUS_LOCKING;
149 		break;
150 	case NTB_CTRL_PART_OP_CFG:
151 		status = NTB_CTRL_PART_STATUS_CONFIGURING;
152 		break;
153 	case NTB_CTRL_PART_OP_RESET:
154 		status = NTB_CTRL_PART_STATUS_RESETTING;
155 		break;
156 	default:
157 		return -EINVAL;
158 	}
159 
160 	iowrite32(op, &ctl->partition_op);
161 
162 	for (i = 0; i < 1000; i++) {
163 		if (msleep_interruptible(50) != 0) {
164 			iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
165 			return -EINTR;
166 		}
167 
168 		ps = ioread32(&ctl->partition_status) & 0xFFFF;
169 
170 		if (ps != status)
171 			break;
172 	}
173 
174 	if (ps == wait_status)
175 		return 0;
176 
177 	if (ps == status) {
178 		dev_err(&sndev->stdev->dev,
179 			"Timed out while performing %s (%d). (%08x)\n",
180 			op_text[op], op,
181 			ioread32(&ctl->partition_status));
182 
183 		return -ETIMEDOUT;
184 	}
185 
186 	return -EIO;
187 }
188 
189 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
190 				  u32 val)
191 {
192 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg))
193 		return -EINVAL;
194 
195 	iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg);
196 
197 	return 0;
198 }
199 
200 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
201 {
202 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
203 	int nr_direct_mw = sndev->peer_nr_direct_mw;
204 	int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
205 
206 	if (pidx != NTB_DEF_PEER_IDX)
207 		return -EINVAL;
208 
209 	if (!use_lut_mws)
210 		nr_lut_mw = 0;
211 
212 	return nr_direct_mw + nr_lut_mw;
213 }
214 
215 static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
216 {
217 	return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
218 }
219 
220 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
221 {
222 	return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
223 }
224 
225 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
226 				      int widx, resource_size_t *addr_align,
227 				      resource_size_t *size_align,
228 				      resource_size_t *size_max)
229 {
230 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
231 	int lut;
232 	resource_size_t size;
233 
234 	if (pidx != NTB_DEF_PEER_IDX)
235 		return -EINVAL;
236 
237 	lut = widx >= sndev->peer_nr_direct_mw;
238 	size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
239 
240 	if (size == 0)
241 		return -EINVAL;
242 
243 	if (addr_align)
244 		*addr_align = lut ? size : SZ_4K;
245 
246 	if (size_align)
247 		*size_align = lut ? size : SZ_4K;
248 
249 	if (size_max)
250 		*size_max = size;
251 
252 	return 0;
253 }
254 
255 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
256 {
257 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
258 	int bar = sndev->peer_direct_mw_to_bar[idx];
259 	u32 ctl_val;
260 
261 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
262 	ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
263 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
264 	iowrite32(0, &ctl->bar_entry[bar].win_size);
265 	iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
266 }
267 
268 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
269 {
270 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
271 
272 	iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
273 }
274 
275 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
276 					dma_addr_t addr, resource_size_t size)
277 {
278 	int xlate_pos = ilog2(size);
279 	int bar = sndev->peer_direct_mw_to_bar[idx];
280 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
281 	u32 ctl_val;
282 
283 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
284 	ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
285 
286 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
287 	iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
288 	iowrite64(sndev->self_partition | addr,
289 		  &ctl->bar_entry[bar].xlate_addr);
290 }
291 
292 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
293 				     dma_addr_t addr, resource_size_t size)
294 {
295 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
296 
297 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
298 		  &ctl->lut_entry[peer_lut_index(sndev, idx)]);
299 }
300 
301 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
302 				      dma_addr_t addr, resource_size_t size)
303 {
304 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
305 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
306 	int xlate_pos = ilog2(size);
307 	int nr_direct_mw = sndev->peer_nr_direct_mw;
308 	int rc;
309 
310 	if (pidx != NTB_DEF_PEER_IDX)
311 		return -EINVAL;
312 
313 	dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
314 		widx, pidx, &addr, &size);
315 
316 	if (widx >= switchtec_ntb_mw_count(ntb, pidx))
317 		return -EINVAL;
318 
319 	if (xlate_pos < 12)
320 		return -EINVAL;
321 
322 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
323 				   NTB_CTRL_PART_STATUS_LOCKED);
324 	if (rc)
325 		return rc;
326 
327 	if (addr == 0 || size == 0) {
328 		if (widx < nr_direct_mw)
329 			switchtec_ntb_mw_clr_direct(sndev, widx);
330 		else
331 			switchtec_ntb_mw_clr_lut(sndev, widx);
332 	} else {
333 		if (widx < nr_direct_mw)
334 			switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
335 		else
336 			switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
337 	}
338 
339 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
340 				   NTB_CTRL_PART_STATUS_NORMAL);
341 
342 	if (rc == -EIO) {
343 		dev_err(&sndev->stdev->dev,
344 			"Hardware reported an error configuring mw %d: %08x\n",
345 			widx, ioread32(&ctl->bar_error));
346 
347 		if (widx < nr_direct_mw)
348 			switchtec_ntb_mw_clr_direct(sndev, widx);
349 		else
350 			switchtec_ntb_mw_clr_lut(sndev, widx);
351 
352 		switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
353 				      NTB_CTRL_PART_STATUS_NORMAL);
354 	}
355 
356 	return rc;
357 }
358 
359 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
360 {
361 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
362 	int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
363 
364 	return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
365 }
366 
367 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
368 					 int idx, phys_addr_t *base,
369 					 resource_size_t *size)
370 {
371 	int bar = sndev->direct_mw_to_bar[idx];
372 	size_t offset = 0;
373 
374 	if (bar < 0)
375 		return -EINVAL;
376 
377 	if (idx == 0) {
378 		/*
379 		 * This is the direct BAR shared with the LUTs
380 		 * which means the actual window will be offset
381 		 * by the size of all the LUT entries.
382 		 */
383 
384 		offset = LUT_SIZE * sndev->nr_lut_mw;
385 	}
386 
387 	if (base)
388 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
389 
390 	if (size) {
391 		*size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
392 		if (offset && *size > offset)
393 			*size = offset;
394 
395 		if (*size > max_mw_size)
396 			*size = max_mw_size;
397 	}
398 
399 	return 0;
400 }
401 
402 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
403 				      int idx, phys_addr_t *base,
404 				      resource_size_t *size)
405 {
406 	int bar = sndev->direct_mw_to_bar[0];
407 	int offset;
408 
409 	offset = LUT_SIZE * lut_index(sndev, idx);
410 
411 	if (base)
412 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
413 
414 	if (size)
415 		*size = LUT_SIZE;
416 
417 	return 0;
418 }
419 
420 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
421 					  phys_addr_t *base,
422 					  resource_size_t *size)
423 {
424 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
425 
426 	if (idx < sndev->nr_direct_mw)
427 		return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
428 	else if (idx < switchtec_ntb_peer_mw_count(ntb))
429 		return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
430 	else
431 		return -EINVAL;
432 }
433 
434 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
435 					  int partition,
436 					  enum ntb_speed *speed,
437 					  enum ntb_width *width)
438 {
439 	struct switchtec_dev *stdev = sndev->stdev;
440 
441 	u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
442 	u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
443 
444 	if (speed)
445 		*speed = (linksta >> 16) & 0xF;
446 
447 	if (width)
448 		*width = (linksta >> 20) & 0x3F;
449 }
450 
451 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
452 {
453 	enum ntb_speed self_speed, peer_speed;
454 	enum ntb_width self_width, peer_width;
455 
456 	if (!sndev->link_is_up) {
457 		sndev->link_speed = NTB_SPEED_NONE;
458 		sndev->link_width = NTB_WIDTH_NONE;
459 		return;
460 	}
461 
462 	switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
463 				      &self_speed, &self_width);
464 	switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
465 				      &peer_speed, &peer_width);
466 
467 	sndev->link_speed = min(self_speed, peer_speed);
468 	sndev->link_width = min(self_width, peer_width);
469 }
470 
471 static int crosslink_is_enabled(struct switchtec_ntb *sndev)
472 {
473 	struct ntb_info_regs __iomem *inf = sndev->mmio_ntb;
474 
475 	return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled);
476 }
477 
478 static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev)
479 {
480 	int i;
481 	u32 msg_map = 0;
482 
483 	if (!crosslink_is_enabled(sndev))
484 		return;
485 
486 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) {
487 		int m = i | sndev->self_partition << 2;
488 
489 		msg_map |= m << i * 8;
490 	}
491 
492 	iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map);
493 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
494 		  &sndev->mmio_peer_dbmsg->odb_mask);
495 }
496 
497 enum {
498 	LINK_MESSAGE = 0,
499 	MSG_LINK_UP = 1,
500 	MSG_LINK_DOWN = 2,
501 	MSG_CHECK_LINK = 3,
502 };
503 
504 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
505 {
506 	int link_sta;
507 	int old = sndev->link_is_up;
508 
509 	link_sta = sndev->self_shared->link_sta;
510 	if (link_sta) {
511 		u64 peer = ioread64(&sndev->peer_shared->magic);
512 
513 		if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
514 			link_sta = peer >> 32;
515 		else
516 			link_sta = 0;
517 	}
518 
519 	sndev->link_is_up = link_sta;
520 	switchtec_ntb_set_link_speed(sndev);
521 
522 	if (link_sta != old) {
523 		switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
524 		ntb_link_event(&sndev->ntb);
525 		dev_info(&sndev->stdev->dev, "ntb link %s\n",
526 			 link_sta ? "up" : "down");
527 
528 		if (link_sta)
529 			crosslink_init_dbmsgs(sndev);
530 	}
531 }
532 
533 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
534 {
535 	struct switchtec_ntb *sndev = stdev->sndev;
536 
537 	switchtec_ntb_check_link(sndev);
538 }
539 
540 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
541 				    enum ntb_speed *speed,
542 				    enum ntb_width *width)
543 {
544 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
545 
546 	if (speed)
547 		*speed = sndev->link_speed;
548 	if (width)
549 		*width = sndev->link_width;
550 
551 	return sndev->link_is_up;
552 }
553 
554 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
555 				     enum ntb_speed max_speed,
556 				     enum ntb_width max_width)
557 {
558 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
559 
560 	dev_dbg(&sndev->stdev->dev, "enabling link\n");
561 
562 	sndev->self_shared->link_sta = 1;
563 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
564 
565 	switchtec_ntb_check_link(sndev);
566 
567 	return 0;
568 }
569 
570 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
571 {
572 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
573 
574 	dev_dbg(&sndev->stdev->dev, "disabling link\n");
575 
576 	sndev->self_shared->link_sta = 0;
577 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
578 
579 	switchtec_ntb_check_link(sndev);
580 
581 	return 0;
582 }
583 
584 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
585 {
586 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
587 
588 	return sndev->db_valid_mask;
589 }
590 
591 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
592 {
593 	return 1;
594 }
595 
596 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
597 {
598 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
599 
600 	if (db_vector < 0 || db_vector > 1)
601 		return 0;
602 
603 	return sndev->db_valid_mask;
604 }
605 
606 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
607 {
608 	u64 ret;
609 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
610 
611 	ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
612 
613 	return ret & sndev->db_valid_mask;
614 }
615 
616 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
617 {
618 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
619 
620 	iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
621 
622 	return 0;
623 }
624 
625 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
626 {
627 	unsigned long irqflags;
628 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
629 
630 	if (db_bits & ~sndev->db_valid_mask)
631 		return -EINVAL;
632 
633 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
634 
635 	sndev->db_mask |= db_bits << sndev->db_shift;
636 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
637 
638 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
639 
640 	return 0;
641 }
642 
643 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
644 {
645 	unsigned long irqflags;
646 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
647 
648 	if (db_bits & ~sndev->db_valid_mask)
649 		return -EINVAL;
650 
651 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
652 
653 	sndev->db_mask &= ~(db_bits << sndev->db_shift);
654 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
655 
656 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
657 
658 	return 0;
659 }
660 
661 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
662 {
663 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
664 
665 	return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
666 }
667 
668 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
669 				      phys_addr_t *db_addr,
670 				      resource_size_t *db_size)
671 {
672 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
673 	unsigned long offset;
674 
675 	offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
676 		(unsigned long)sndev->stdev->mmio;
677 
678 	offset += sndev->db_shift / 8;
679 
680 	if (db_addr)
681 		*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
682 	if (db_size)
683 		*db_size = sizeof(u32);
684 
685 	return 0;
686 }
687 
688 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
689 {
690 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
691 
692 	iowrite64(db_bits << sndev->db_peer_shift,
693 		  &sndev->mmio_peer_dbmsg->odb);
694 
695 	return 0;
696 }
697 
698 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
699 {
700 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
701 
702 	return ARRAY_SIZE(sndev->self_shared->spad);
703 }
704 
705 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
706 {
707 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
708 
709 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
710 		return 0;
711 
712 	if (!sndev->self_shared)
713 		return 0;
714 
715 	return sndev->self_shared->spad[idx];
716 }
717 
718 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
719 {
720 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
721 
722 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
723 		return -EINVAL;
724 
725 	if (!sndev->self_shared)
726 		return -EIO;
727 
728 	sndev->self_shared->spad[idx] = val;
729 
730 	return 0;
731 }
732 
733 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
734 					int sidx)
735 {
736 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
737 
738 	if (pidx != NTB_DEF_PEER_IDX)
739 		return -EINVAL;
740 
741 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
742 		return 0;
743 
744 	if (!sndev->peer_shared)
745 		return 0;
746 
747 	return ioread32(&sndev->peer_shared->spad[sidx]);
748 }
749 
750 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
751 					 int sidx, u32 val)
752 {
753 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
754 
755 	if (pidx != NTB_DEF_PEER_IDX)
756 		return -EINVAL;
757 
758 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
759 		return -EINVAL;
760 
761 	if (!sndev->peer_shared)
762 		return -EIO;
763 
764 	iowrite32(val, &sndev->peer_shared->spad[sidx]);
765 
766 	return 0;
767 }
768 
769 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
770 					int sidx, phys_addr_t *spad_addr)
771 {
772 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
773 	unsigned long offset;
774 
775 	if (pidx != NTB_DEF_PEER_IDX)
776 		return -EINVAL;
777 
778 	offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
779 		(unsigned long)sndev->stdev->mmio;
780 
781 	if (spad_addr)
782 		*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
783 
784 	return 0;
785 }
786 
787 static const struct ntb_dev_ops switchtec_ntb_ops = {
788 	.mw_count		= switchtec_ntb_mw_count,
789 	.mw_get_align		= switchtec_ntb_mw_get_align,
790 	.mw_set_trans		= switchtec_ntb_mw_set_trans,
791 	.peer_mw_count		= switchtec_ntb_peer_mw_count,
792 	.peer_mw_get_addr	= switchtec_ntb_peer_mw_get_addr,
793 	.link_is_up		= switchtec_ntb_link_is_up,
794 	.link_enable		= switchtec_ntb_link_enable,
795 	.link_disable		= switchtec_ntb_link_disable,
796 	.db_valid_mask		= switchtec_ntb_db_valid_mask,
797 	.db_vector_count	= switchtec_ntb_db_vector_count,
798 	.db_vector_mask		= switchtec_ntb_db_vector_mask,
799 	.db_read		= switchtec_ntb_db_read,
800 	.db_clear		= switchtec_ntb_db_clear,
801 	.db_set_mask		= switchtec_ntb_db_set_mask,
802 	.db_clear_mask		= switchtec_ntb_db_clear_mask,
803 	.db_read_mask		= switchtec_ntb_db_read_mask,
804 	.peer_db_addr		= switchtec_ntb_peer_db_addr,
805 	.peer_db_set		= switchtec_ntb_peer_db_set,
806 	.spad_count		= switchtec_ntb_spad_count,
807 	.spad_read		= switchtec_ntb_spad_read,
808 	.spad_write		= switchtec_ntb_spad_write,
809 	.peer_spad_read		= switchtec_ntb_peer_spad_read,
810 	.peer_spad_write	= switchtec_ntb_peer_spad_write,
811 	.peer_spad_addr		= switchtec_ntb_peer_spad_addr,
812 };
813 
814 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
815 {
816 	u64 tpart_vec;
817 	int self;
818 	u64 part_map;
819 	int bit;
820 
821 	sndev->ntb.pdev = sndev->stdev->pdev;
822 	sndev->ntb.topo = NTB_TOPO_SWITCH;
823 	sndev->ntb.ops = &switchtec_ntb_ops;
824 
825 	sndev->self_partition = sndev->stdev->partition;
826 
827 	sndev->mmio_ntb = sndev->stdev->mmio_ntb;
828 
829 	self = sndev->self_partition;
830 	tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
831 	tpart_vec <<= 32;
832 	tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
833 
834 	part_map = ioread64(&sndev->mmio_ntb->ep_map);
835 	part_map &= ~(1 << sndev->self_partition);
836 
837 	if (!ffs(tpart_vec)) {
838 		if (sndev->stdev->partition_count != 2) {
839 			dev_err(&sndev->stdev->dev,
840 				"ntb target partition not defined\n");
841 			return -ENODEV;
842 		}
843 
844 		bit = ffs(part_map);
845 		if (!bit) {
846 			dev_err(&sndev->stdev->dev,
847 				"peer partition is not NT partition\n");
848 			return -ENODEV;
849 		}
850 
851 		sndev->peer_partition = bit - 1;
852 	} else {
853 		if (ffs(tpart_vec) != fls(tpart_vec)) {
854 			dev_err(&sndev->stdev->dev,
855 				"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
856 			return -ENODEV;
857 		}
858 
859 		sndev->peer_partition = ffs(tpart_vec) - 1;
860 		if (!(part_map && (1 << sndev->peer_partition))) {
861 			dev_err(&sndev->stdev->dev,
862 				"ntb target partition is not NT partition\n");
863 			return -ENODEV;
864 		}
865 	}
866 
867 	dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
868 		sndev->self_partition, sndev->stdev->partition_count);
869 
870 	sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
871 		SWITCHTEC_NTB_REG_CTRL_OFFSET;
872 	sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
873 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
874 
875 	sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
876 	sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
877 	sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
878 	sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg;
879 
880 	return 0;
881 }
882 
883 static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
884 			       struct ntb_ctrl_regs __iomem *ctl,
885 			       int lut_idx, int partition, u64 addr)
886 {
887 	int peer_bar = sndev->peer_direct_mw_to_bar[0];
888 	u32 ctl_val;
889 	int rc;
890 
891 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
892 				   NTB_CTRL_PART_STATUS_LOCKED);
893 	if (rc)
894 		return rc;
895 
896 	ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
897 	ctl_val &= 0xFF;
898 	ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
899 	ctl_val |= ilog2(LUT_SIZE) << 8;
900 	ctl_val |= (sndev->nr_lut_mw - 1) << 14;
901 	iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
902 
903 	iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
904 		  &ctl->lut_entry[lut_idx]);
905 
906 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
907 				   NTB_CTRL_PART_STATUS_NORMAL);
908 	if (rc) {
909 		u32 bar_error, lut_error;
910 
911 		bar_error = ioread32(&ctl->bar_error);
912 		lut_error = ioread32(&ctl->lut_error);
913 		dev_err(&sndev->stdev->dev,
914 			"Error setting up reserved lut window: %08x / %08x\n",
915 			bar_error, lut_error);
916 		return rc;
917 	}
918 
919 	return 0;
920 }
921 
922 static int config_req_id_table(struct switchtec_ntb *sndev,
923 			       struct ntb_ctrl_regs __iomem *mmio_ctrl,
924 			       int *req_ids, int count)
925 {
926 	int i, rc = 0;
927 	u32 error;
928 	u32 proxy_id;
929 
930 	if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
931 		dev_err(&sndev->stdev->dev,
932 			"Not enough requester IDs available.\n");
933 		return -EFAULT;
934 	}
935 
936 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
937 				   NTB_CTRL_PART_OP_LOCK,
938 				   NTB_CTRL_PART_STATUS_LOCKED);
939 	if (rc)
940 		return rc;
941 
942 	iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
943 		  &mmio_ctrl->partition_ctrl);
944 
945 	for (i = 0; i < count; i++) {
946 		iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
947 			  &mmio_ctrl->req_id_table[i]);
948 
949 		proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
950 		dev_dbg(&sndev->stdev->dev,
951 			"Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
952 			req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
953 			req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
954 			(proxy_id >> 1) & 0x7);
955 	}
956 
957 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
958 				   NTB_CTRL_PART_OP_CFG,
959 				   NTB_CTRL_PART_STATUS_NORMAL);
960 
961 	if (rc == -EIO) {
962 		error = ioread32(&mmio_ctrl->req_id_error);
963 		dev_err(&sndev->stdev->dev,
964 			"Error setting up the requester ID table: %08x\n",
965 			error);
966 	}
967 
968 	return 0;
969 }
970 
971 static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
972 			       u64 *mw_addrs, int mw_count)
973 {
974 	int rc, i;
975 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl;
976 	u64 addr;
977 	size_t size, offset;
978 	int bar;
979 	int xlate_pos;
980 	u32 ctl_val;
981 
982 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
983 				   NTB_CTRL_PART_STATUS_LOCKED);
984 	if (rc)
985 		return rc;
986 
987 	for (i = 0; i < sndev->nr_lut_mw; i++) {
988 		if (i == ntb_lut_idx)
989 			continue;
990 
991 		addr = mw_addrs[0] + LUT_SIZE * i;
992 
993 		iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) |
994 			   addr),
995 			  &ctl->lut_entry[i]);
996 	}
997 
998 	sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count);
999 
1000 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1001 		bar = sndev->direct_mw_to_bar[i];
1002 		offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0;
1003 		addr = mw_addrs[i] + offset;
1004 		size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
1005 		xlate_pos = ilog2(size);
1006 
1007 		if (offset && size > offset)
1008 			size = offset;
1009 
1010 		ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
1011 		ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
1012 
1013 		iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
1014 		iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
1015 		iowrite64(sndev->peer_partition | addr,
1016 			  &ctl->bar_entry[bar].xlate_addr);
1017 	}
1018 
1019 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
1020 				   NTB_CTRL_PART_STATUS_NORMAL);
1021 	if (rc) {
1022 		u32 bar_error, lut_error;
1023 
1024 		bar_error = ioread32(&ctl->bar_error);
1025 		lut_error = ioread32(&ctl->lut_error);
1026 		dev_err(&sndev->stdev->dev,
1027 			"Error setting up cross link windows: %08x / %08x\n",
1028 			bar_error, lut_error);
1029 		return rc;
1030 	}
1031 
1032 	return 0;
1033 }
1034 
1035 static int crosslink_setup_req_ids(struct switchtec_ntb *sndev,
1036 	struct ntb_ctrl_regs __iomem *mmio_ctrl)
1037 {
1038 	int req_ids[16];
1039 	int i;
1040 	u32 proxy_id;
1041 
1042 	for (i = 0; i < ARRAY_SIZE(req_ids); i++) {
1043 		proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]);
1044 
1045 		if (!(proxy_id & NTB_CTRL_REQ_ID_EN))
1046 			break;
1047 
1048 		req_ids[i] = ((proxy_id >> 1) & 0xFF);
1049 	}
1050 
1051 	return config_req_id_table(sndev, mmio_ctrl, req_ids, i);
1052 }
1053 
1054 /*
1055  * In crosslink configuration there is a virtual partition in the
1056  * middle of the two switches. The BARs in this partition have to be
1057  * enumerated and assigned addresses.
1058  */
1059 static int crosslink_enum_partition(struct switchtec_ntb *sndev,
1060 				    u64 *bar_addrs)
1061 {
1062 	struct part_cfg_regs __iomem *part_cfg =
1063 		&sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
1064 	u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
1065 	struct pff_csr_regs __iomem *mmio_pff =
1066 		&sndev->stdev->mmio_pff_csr[pff];
1067 	const u64 bar_space = 0x1000000000LL;
1068 	u64 bar_addr;
1069 	int bar_cnt = 0;
1070 	int i;
1071 
1072 	iowrite16(0x6, &mmio_pff->pcicmd);
1073 
1074 	for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) {
1075 		iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]);
1076 		bar_addr = ioread64(&mmio_pff->pci_bar64[i]);
1077 		bar_addr &= ~0xf;
1078 
1079 		dev_dbg(&sndev->stdev->dev,
1080 			"Crosslink BAR%d addr: %llx\n",
1081 			i, bar_addr);
1082 
1083 		if (bar_addr != bar_space * i)
1084 			continue;
1085 
1086 		bar_addrs[bar_cnt++] = bar_addr;
1087 	}
1088 
1089 	return bar_cnt;
1090 }
1091 
1092 static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev)
1093 {
1094 	int rc;
1095 	int bar = sndev->direct_mw_to_bar[0];
1096 	const int ntb_lut_idx = 1;
1097 	u64 bar_addrs[6];
1098 	u64 addr;
1099 	int offset;
1100 	int bar_cnt;
1101 
1102 	if (!crosslink_is_enabled(sndev))
1103 		return 0;
1104 
1105 	dev_info(&sndev->stdev->dev, "Using crosslink configuration\n");
1106 	sndev->ntb.topo = NTB_TOPO_CROSSLINK;
1107 
1108 	bar_cnt = crosslink_enum_partition(sndev, bar_addrs);
1109 	if (bar_cnt < sndev->nr_direct_mw + 1) {
1110 		dev_err(&sndev->stdev->dev,
1111 			"Error enumerating crosslink partition\n");
1112 		return -EINVAL;
1113 	}
1114 
1115 	addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET +
1116 		SWITCHTEC_NTB_REG_DBMSG_OFFSET +
1117 		sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition);
1118 
1119 	offset = addr & (LUT_SIZE - 1);
1120 	addr -= offset;
1121 
1122 	rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx,
1123 				 sndev->peer_partition, addr);
1124 	if (rc)
1125 		return rc;
1126 
1127 	rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1],
1128 				 bar_cnt - 1);
1129 	if (rc)
1130 		return rc;
1131 
1132 	rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl);
1133 	if (rc)
1134 		return rc;
1135 
1136 	sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar,
1137 						LUT_SIZE, LUT_SIZE);
1138 	if (!sndev->mmio_xlink_win) {
1139 		rc = -ENOMEM;
1140 		return rc;
1141 	}
1142 
1143 	sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset;
1144 	sndev->nr_rsvd_luts++;
1145 
1146 	crosslink_init_dbmsgs(sndev);
1147 
1148 	return 0;
1149 }
1150 
1151 static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev)
1152 {
1153 	if (sndev->mmio_xlink_win)
1154 		pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win);
1155 }
1156 
1157 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
1158 {
1159 	int i;
1160 	int cnt = 0;
1161 
1162 	for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
1163 		u32 r = ioread32(&ctrl->bar_entry[i].ctl);
1164 
1165 		if (r & NTB_CTRL_BAR_VALID)
1166 			map[cnt++] = i;
1167 	}
1168 
1169 	return cnt;
1170 }
1171 
1172 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
1173 {
1174 	sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
1175 				       sndev->mmio_self_ctrl);
1176 
1177 	sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
1178 	sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
1179 
1180 	dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
1181 		sndev->nr_direct_mw, sndev->nr_lut_mw);
1182 
1183 	sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
1184 					    sndev->mmio_peer_ctrl);
1185 
1186 	sndev->peer_nr_lut_mw =
1187 		ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
1188 	sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
1189 
1190 	dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
1191 		sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
1192 
1193 }
1194 
1195 /*
1196  * There are 64 doorbells in the switch hardware but this is
1197  * shared among all partitions. So we must split them in half
1198  * (32 for each partition). However, the message interrupts are
1199  * also shared with the top 4 doorbells so we just limit this to
1200  * 28 doorbells per partition.
1201  *
1202  * In crosslink mode, each side has it's own dbmsg register so
1203  * they can each use all 60 of the available doorbells.
1204  */
1205 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
1206 {
1207 	sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
1208 
1209 	if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) {
1210 		sndev->db_shift = 0;
1211 		sndev->db_peer_shift = 0;
1212 		sndev->db_valid_mask = sndev->db_mask;
1213 	} else if (sndev->self_partition < sndev->peer_partition) {
1214 		sndev->db_shift = 0;
1215 		sndev->db_peer_shift = 32;
1216 		sndev->db_valid_mask = 0x0FFFFFFF;
1217 	} else {
1218 		sndev->db_shift = 32;
1219 		sndev->db_peer_shift = 0;
1220 		sndev->db_valid_mask = 0x0FFFFFFF;
1221 	}
1222 
1223 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
1224 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
1225 		  &sndev->mmio_peer_dbmsg->odb_mask);
1226 
1227 	dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n",
1228 		sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask);
1229 }
1230 
1231 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
1232 {
1233 	int i;
1234 	u32 msg_map = 0;
1235 
1236 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1237 		int m = i | sndev->peer_partition << 2;
1238 
1239 		msg_map |= m << i * 8;
1240 	}
1241 
1242 	iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
1243 
1244 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
1245 		iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
1246 			  &sndev->mmio_self_dbmsg->imsg[i]);
1247 }
1248 
1249 static int
1250 switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
1251 {
1252 	int req_ids[2];
1253 
1254 	/*
1255 	 * Root Complex Requester ID (which is 0:00.0)
1256 	 */
1257 	req_ids[0] = 0;
1258 
1259 	/*
1260 	 * Host Bridge Requester ID (as read from the mmap address)
1261 	 */
1262 	req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
1263 
1264 	return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
1265 				   ARRAY_SIZE(req_ids));
1266 }
1267 
1268 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
1269 {
1270 	int i;
1271 
1272 	memset(sndev->self_shared, 0, LUT_SIZE);
1273 	sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
1274 	sndev->self_shared->partition_id = sndev->stdev->partition;
1275 
1276 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1277 		int bar = sndev->direct_mw_to_bar[i];
1278 		resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
1279 
1280 		if (i == 0)
1281 			sz = min_t(resource_size_t, sz,
1282 				   LUT_SIZE * sndev->nr_lut_mw);
1283 
1284 		sndev->self_shared->mw_sizes[i] = sz;
1285 	}
1286 
1287 	for (i = 0; i < sndev->nr_lut_mw; i++) {
1288 		int idx = sndev->nr_direct_mw + i;
1289 
1290 		sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
1291 	}
1292 }
1293 
1294 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1295 {
1296 	int self_bar = sndev->direct_mw_to_bar[0];
1297 	int rc;
1298 
1299 	sndev->nr_rsvd_luts++;
1300 	sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
1301 						 LUT_SIZE,
1302 						 &sndev->self_shared_dma,
1303 						 GFP_KERNEL);
1304 	if (!sndev->self_shared) {
1305 		dev_err(&sndev->stdev->dev,
1306 			"unable to allocate memory for shared mw\n");
1307 		return -ENOMEM;
1308 	}
1309 
1310 	switchtec_ntb_init_shared(sndev);
1311 
1312 	rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
1313 				 sndev->self_partition,
1314 				 sndev->self_shared_dma);
1315 	if (rc)
1316 		goto unalloc_and_exit;
1317 
1318 	sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
1319 	if (!sndev->peer_shared) {
1320 		rc = -ENOMEM;
1321 		goto unalloc_and_exit;
1322 	}
1323 
1324 	dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
1325 	return 0;
1326 
1327 unalloc_and_exit:
1328 	dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1329 			  sndev->self_shared, sndev->self_shared_dma);
1330 
1331 	return rc;
1332 }
1333 
1334 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1335 {
1336 	if (sndev->peer_shared)
1337 		pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1338 
1339 	if (sndev->self_shared)
1340 		dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1341 				  sndev->self_shared,
1342 				  sndev->self_shared_dma);
1343 	sndev->nr_rsvd_luts--;
1344 }
1345 
1346 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1347 {
1348 	struct switchtec_ntb *sndev = dev;
1349 
1350 	dev_dbg(&sndev->stdev->dev, "doorbell\n");
1351 
1352 	ntb_db_event(&sndev->ntb, 0);
1353 
1354 	return IRQ_HANDLED;
1355 }
1356 
1357 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1358 {
1359 	int i;
1360 	struct switchtec_ntb *sndev = dev;
1361 
1362 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1363 		u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1364 
1365 		if (msg & NTB_DBMSG_IMSG_STATUS) {
1366 			dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
1367 				i, (u32)msg);
1368 			iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1369 
1370 			if (i == LINK_MESSAGE)
1371 				switchtec_ntb_check_link(sndev);
1372 		}
1373 	}
1374 
1375 	return IRQ_HANDLED;
1376 }
1377 
1378 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1379 {
1380 	int i;
1381 	int rc;
1382 	int doorbell_irq = 0;
1383 	int message_irq = 0;
1384 	int event_irq;
1385 	int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1386 
1387 	event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1388 
1389 	while (doorbell_irq == event_irq)
1390 		doorbell_irq++;
1391 	while (message_irq == doorbell_irq ||
1392 	       message_irq == event_irq)
1393 		message_irq++;
1394 
1395 	dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
1396 		event_irq, doorbell_irq, message_irq);
1397 
1398 	for (i = 0; i < idb_vecs - 4; i++)
1399 		iowrite8(doorbell_irq,
1400 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1401 
1402 	for (; i < idb_vecs; i++)
1403 		iowrite8(message_irq,
1404 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1405 
1406 	sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1407 	sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1408 
1409 	rc = request_irq(sndev->doorbell_irq,
1410 			 switchtec_ntb_doorbell_isr, 0,
1411 			 "switchtec_ntb_doorbell", sndev);
1412 	if (rc)
1413 		return rc;
1414 
1415 	rc = request_irq(sndev->message_irq,
1416 			 switchtec_ntb_message_isr, 0,
1417 			 "switchtec_ntb_message", sndev);
1418 	if (rc) {
1419 		free_irq(sndev->doorbell_irq, sndev);
1420 		return rc;
1421 	}
1422 
1423 	return 0;
1424 }
1425 
1426 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1427 {
1428 	free_irq(sndev->doorbell_irq, sndev);
1429 	free_irq(sndev->message_irq, sndev);
1430 }
1431 
1432 static int switchtec_ntb_add(struct device *dev,
1433 			     struct class_interface *class_intf)
1434 {
1435 	struct switchtec_dev *stdev = to_stdev(dev);
1436 	struct switchtec_ntb *sndev;
1437 	int rc;
1438 
1439 	stdev->sndev = NULL;
1440 
1441 	if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
1442 		return -ENODEV;
1443 
1444 	sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1445 	if (!sndev)
1446 		return -ENOMEM;
1447 
1448 	sndev->stdev = stdev;
1449 	rc = switchtec_ntb_init_sndev(sndev);
1450 	if (rc)
1451 		goto free_and_exit;
1452 
1453 	switchtec_ntb_init_mw(sndev);
1454 
1455 	rc = switchtec_ntb_init_req_id_table(sndev);
1456 	if (rc)
1457 		goto free_and_exit;
1458 
1459 	rc = switchtec_ntb_init_crosslink(sndev);
1460 	if (rc)
1461 		goto free_and_exit;
1462 
1463 	switchtec_ntb_init_db(sndev);
1464 	switchtec_ntb_init_msgs(sndev);
1465 
1466 	rc = switchtec_ntb_init_shared_mw(sndev);
1467 	if (rc)
1468 		goto deinit_crosslink;
1469 
1470 	rc = switchtec_ntb_init_db_msg_irq(sndev);
1471 	if (rc)
1472 		goto deinit_shared_and_exit;
1473 
1474 	rc = ntb_register_device(&sndev->ntb);
1475 	if (rc)
1476 		goto deinit_and_exit;
1477 
1478 	stdev->sndev = sndev;
1479 	stdev->link_notifier = switchtec_ntb_link_notification;
1480 	dev_info(dev, "NTB device registered\n");
1481 
1482 	return 0;
1483 
1484 deinit_and_exit:
1485 	switchtec_ntb_deinit_db_msg_irq(sndev);
1486 deinit_shared_and_exit:
1487 	switchtec_ntb_deinit_shared_mw(sndev);
1488 deinit_crosslink:
1489 	switchtec_ntb_deinit_crosslink(sndev);
1490 free_and_exit:
1491 	kfree(sndev);
1492 	dev_err(dev, "failed to register ntb device: %d\n", rc);
1493 	return rc;
1494 }
1495 
1496 void switchtec_ntb_remove(struct device *dev,
1497 			  struct class_interface *class_intf)
1498 {
1499 	struct switchtec_dev *stdev = to_stdev(dev);
1500 	struct switchtec_ntb *sndev = stdev->sndev;
1501 
1502 	if (!sndev)
1503 		return;
1504 
1505 	stdev->link_notifier = NULL;
1506 	stdev->sndev = NULL;
1507 	ntb_unregister_device(&sndev->ntb);
1508 	switchtec_ntb_deinit_db_msg_irq(sndev);
1509 	switchtec_ntb_deinit_shared_mw(sndev);
1510 	switchtec_ntb_deinit_crosslink(sndev);
1511 	kfree(sndev);
1512 	dev_info(dev, "ntb device unregistered\n");
1513 }
1514 
1515 static struct class_interface switchtec_interface  = {
1516 	.add_dev = switchtec_ntb_add,
1517 	.remove_dev = switchtec_ntb_remove,
1518 };
1519 
1520 static int __init switchtec_ntb_init(void)
1521 {
1522 	switchtec_interface.class = switchtec_class;
1523 	return class_interface_register(&switchtec_interface);
1524 }
1525 module_init(switchtec_ntb_init);
1526 
1527 static void __exit switchtec_ntb_exit(void)
1528 {
1529 	class_interface_unregister(&switchtec_interface);
1530 }
1531 module_exit(switchtec_ntb_exit);
1532