xref: /linux/drivers/ntb/hw/mscc/ntb_hw_switchtec.c (revision b9a4acac282eff60cba800bdbc5a3b57c33c10be)
1 /*
2  * Microsemi Switchtec(tm) PCIe Management Driver
3  * Copyright (c) 2017, Microsemi Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <linux/switchtec.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/ntb.h>
22 
23 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
24 MODULE_VERSION("0.1");
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Microsemi Corporation");
27 
28 static bool use_lut_mws;
29 module_param(use_lut_mws, bool, 0644);
30 MODULE_PARM_DESC(use_lut_mws,
31 		 "Enable the use of the LUT based memory windows");
32 
33 #ifndef ioread64
34 #ifdef readq
35 #define ioread64 readq
36 #else
37 #define ioread64 _ioread64
38 static inline u64 _ioread64(void __iomem *mmio)
39 {
40 	u64 low, high;
41 
42 	low = ioread32(mmio);
43 	high = ioread32(mmio + sizeof(u32));
44 	return low | (high << 32);
45 }
46 #endif
47 #endif
48 
49 #ifndef iowrite64
50 #ifdef writeq
51 #define iowrite64 writeq
52 #else
53 #define iowrite64 _iowrite64
54 static inline void _iowrite64(u64 val, void __iomem *mmio)
55 {
56 	iowrite32(val, mmio);
57 	iowrite32(val >> 32, mmio + sizeof(u32));
58 }
59 #endif
60 #endif
61 
62 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
63 #define MAX_MWS     128
64 
65 struct shared_mw {
66 	u32 magic;
67 	u32 link_sta;
68 	u32 partition_id;
69 	u64 mw_sizes[MAX_MWS];
70 	u32 spad[128];
71 };
72 
73 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
74 #define LUT_SIZE SZ_64K
75 
76 struct switchtec_ntb {
77 	struct ntb_dev ntb;
78 	struct switchtec_dev *stdev;
79 
80 	int self_partition;
81 	int peer_partition;
82 
83 	int doorbell_irq;
84 	int message_irq;
85 
86 	struct ntb_info_regs __iomem *mmio_ntb;
87 	struct ntb_ctrl_regs __iomem *mmio_ctrl;
88 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
89 	struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
90 	struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
91 	struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
92 
93 	struct shared_mw *self_shared;
94 	struct shared_mw __iomem *peer_shared;
95 	dma_addr_t self_shared_dma;
96 
97 	u64 db_mask;
98 	u64 db_valid_mask;
99 	int db_shift;
100 	int db_peer_shift;
101 
102 	/* synchronize rmw access of db_mask and hw reg */
103 	spinlock_t db_mask_lock;
104 
105 	int nr_direct_mw;
106 	int nr_lut_mw;
107 	int direct_mw_to_bar[MAX_DIRECT_MW];
108 
109 	int peer_nr_direct_mw;
110 	int peer_nr_lut_mw;
111 	int peer_direct_mw_to_bar[MAX_DIRECT_MW];
112 
113 	bool link_is_up;
114 	enum ntb_speed link_speed;
115 	enum ntb_width link_width;
116 };
117 
118 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
119 {
120 	return container_of(ntb, struct switchtec_ntb, ntb);
121 }
122 
123 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
124 				 struct ntb_ctrl_regs __iomem *ctl,
125 				 u32 op, int wait_status)
126 {
127 	static const char * const op_text[] = {
128 		[NTB_CTRL_PART_OP_LOCK] = "lock",
129 		[NTB_CTRL_PART_OP_CFG] = "configure",
130 		[NTB_CTRL_PART_OP_RESET] = "reset",
131 	};
132 
133 	int i;
134 	u32 ps;
135 	int status;
136 
137 	switch (op) {
138 	case NTB_CTRL_PART_OP_LOCK:
139 		status = NTB_CTRL_PART_STATUS_LOCKING;
140 		break;
141 	case NTB_CTRL_PART_OP_CFG:
142 		status = NTB_CTRL_PART_STATUS_CONFIGURING;
143 		break;
144 	case NTB_CTRL_PART_OP_RESET:
145 		status = NTB_CTRL_PART_STATUS_RESETTING;
146 		break;
147 	default:
148 		return -EINVAL;
149 	}
150 
151 	iowrite32(op, &ctl->partition_op);
152 
153 	for (i = 0; i < 1000; i++) {
154 		if (msleep_interruptible(50) != 0) {
155 			iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
156 			return -EINTR;
157 		}
158 
159 		ps = ioread32(&ctl->partition_status) & 0xFFFF;
160 
161 		if (ps != status)
162 			break;
163 	}
164 
165 	if (ps == wait_status)
166 		return 0;
167 
168 	if (ps == status) {
169 		dev_err(&sndev->stdev->dev,
170 			"Timed out while peforming %s (%d). (%08x)",
171 			op_text[op], op,
172 			ioread32(&ctl->partition_status));
173 
174 		return -ETIMEDOUT;
175 	}
176 
177 	return -EIO;
178 }
179 
180 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
181 				  u32 val)
182 {
183 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg))
184 		return -EINVAL;
185 
186 	iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg);
187 
188 	return 0;
189 }
190 
191 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
192 {
193 	return 0;
194 }
195 
196 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
197 				      int widx, resource_size_t *addr_align,
198 				      resource_size_t *size_align,
199 				      resource_size_t *size_max)
200 {
201 	return 0;
202 }
203 
204 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
205 				      dma_addr_t addr, resource_size_t size)
206 {
207 	return 0;
208 }
209 
210 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
211 {
212 	return 0;
213 }
214 
215 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
216 					  phys_addr_t *base,
217 					  resource_size_t *size)
218 {
219 	return 0;
220 }
221 
222 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
223 					  int partition,
224 					  enum ntb_speed *speed,
225 					  enum ntb_width *width)
226 {
227 	struct switchtec_dev *stdev = sndev->stdev;
228 
229 	u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
230 	u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
231 
232 	if (speed)
233 		*speed = (linksta >> 16) & 0xF;
234 
235 	if (width)
236 		*width = (linksta >> 20) & 0x3F;
237 }
238 
239 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
240 {
241 	enum ntb_speed self_speed, peer_speed;
242 	enum ntb_width self_width, peer_width;
243 
244 	if (!sndev->link_is_up) {
245 		sndev->link_speed = NTB_SPEED_NONE;
246 		sndev->link_width = NTB_WIDTH_NONE;
247 		return;
248 	}
249 
250 	switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
251 				      &self_speed, &self_width);
252 	switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
253 				      &peer_speed, &peer_width);
254 
255 	sndev->link_speed = min(self_speed, peer_speed);
256 	sndev->link_width = min(self_width, peer_width);
257 }
258 
259 enum {
260 	LINK_MESSAGE = 0,
261 	MSG_LINK_UP = 1,
262 	MSG_LINK_DOWN = 2,
263 	MSG_CHECK_LINK = 3,
264 };
265 
266 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
267 {
268 	int link_sta;
269 	int old = sndev->link_is_up;
270 
271 	link_sta = sndev->self_shared->link_sta;
272 	if (link_sta) {
273 		u64 peer = ioread64(&sndev->peer_shared->magic);
274 
275 		if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
276 			link_sta = peer >> 32;
277 		else
278 			link_sta = 0;
279 	}
280 
281 	sndev->link_is_up = link_sta;
282 	switchtec_ntb_set_link_speed(sndev);
283 
284 	if (link_sta != old) {
285 		switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
286 		ntb_link_event(&sndev->ntb);
287 		dev_info(&sndev->stdev->dev, "ntb link %s",
288 			 link_sta ? "up" : "down");
289 	}
290 }
291 
292 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
293 {
294 	struct switchtec_ntb *sndev = stdev->sndev;
295 
296 	switchtec_ntb_check_link(sndev);
297 }
298 
299 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
300 				    enum ntb_speed *speed,
301 				    enum ntb_width *width)
302 {
303 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
304 
305 	if (speed)
306 		*speed = sndev->link_speed;
307 	if (width)
308 		*width = sndev->link_width;
309 
310 	return sndev->link_is_up;
311 }
312 
313 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
314 				     enum ntb_speed max_speed,
315 				     enum ntb_width max_width)
316 {
317 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
318 
319 	dev_dbg(&sndev->stdev->dev, "enabling link");
320 
321 	sndev->self_shared->link_sta = 1;
322 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
323 
324 	switchtec_ntb_check_link(sndev);
325 
326 	return 0;
327 }
328 
329 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
330 {
331 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
332 
333 	dev_dbg(&sndev->stdev->dev, "disabling link");
334 
335 	sndev->self_shared->link_sta = 0;
336 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
337 
338 	switchtec_ntb_check_link(sndev);
339 
340 	return 0;
341 }
342 
343 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
344 {
345 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
346 
347 	return sndev->db_valid_mask;
348 }
349 
350 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
351 {
352 	return 1;
353 }
354 
355 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
356 {
357 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
358 
359 	if (db_vector < 0 || db_vector > 1)
360 		return 0;
361 
362 	return sndev->db_valid_mask;
363 }
364 
365 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
366 {
367 	u64 ret;
368 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
369 
370 	ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
371 
372 	return ret & sndev->db_valid_mask;
373 }
374 
375 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
376 {
377 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
378 
379 	iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
380 
381 	return 0;
382 }
383 
384 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
385 {
386 	unsigned long irqflags;
387 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
388 
389 	if (db_bits & ~sndev->db_valid_mask)
390 		return -EINVAL;
391 
392 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
393 
394 	sndev->db_mask |= db_bits << sndev->db_shift;
395 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
396 
397 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
398 
399 	return 0;
400 }
401 
402 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
403 {
404 	unsigned long irqflags;
405 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
406 
407 	if (db_bits & ~sndev->db_valid_mask)
408 		return -EINVAL;
409 
410 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
411 
412 	sndev->db_mask &= ~(db_bits << sndev->db_shift);
413 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
414 
415 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
416 
417 	return 0;
418 }
419 
420 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
421 {
422 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
423 
424 	return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
425 }
426 
427 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
428 				      phys_addr_t *db_addr,
429 				      resource_size_t *db_size)
430 {
431 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
432 	unsigned long offset;
433 
434 	offset = (unsigned long)sndev->mmio_self_dbmsg->odb -
435 		(unsigned long)sndev->stdev->mmio;
436 
437 	offset += sndev->db_shift / 8;
438 
439 	if (db_addr)
440 		*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
441 	if (db_size)
442 		*db_size = sizeof(u32);
443 
444 	return 0;
445 }
446 
447 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
448 {
449 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
450 
451 	iowrite64(db_bits << sndev->db_peer_shift,
452 		  &sndev->mmio_self_dbmsg->odb);
453 
454 	return 0;
455 }
456 
457 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
458 {
459 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
460 
461 	return ARRAY_SIZE(sndev->self_shared->spad);
462 }
463 
464 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
465 {
466 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
467 
468 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
469 		return 0;
470 
471 	if (!sndev->self_shared)
472 		return 0;
473 
474 	return sndev->self_shared->spad[idx];
475 }
476 
477 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
478 {
479 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
480 
481 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
482 		return -EINVAL;
483 
484 	if (!sndev->self_shared)
485 		return -EIO;
486 
487 	sndev->self_shared->spad[idx] = val;
488 
489 	return 0;
490 }
491 
492 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
493 					int sidx)
494 {
495 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
496 
497 	if (pidx != NTB_DEF_PEER_IDX)
498 		return -EINVAL;
499 
500 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
501 		return 0;
502 
503 	if (!sndev->peer_shared)
504 		return 0;
505 
506 	return ioread32(&sndev->peer_shared->spad[sidx]);
507 }
508 
509 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
510 					 int sidx, u32 val)
511 {
512 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
513 
514 	if (pidx != NTB_DEF_PEER_IDX)
515 		return -EINVAL;
516 
517 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
518 		return -EINVAL;
519 
520 	if (!sndev->peer_shared)
521 		return -EIO;
522 
523 	iowrite32(val, &sndev->peer_shared->spad[sidx]);
524 
525 	return 0;
526 }
527 
528 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
529 					int sidx, phys_addr_t *spad_addr)
530 {
531 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
532 	unsigned long offset;
533 
534 	if (pidx != NTB_DEF_PEER_IDX)
535 		return -EINVAL;
536 
537 	offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
538 		(unsigned long)sndev->stdev->mmio;
539 
540 	if (spad_addr)
541 		*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
542 
543 	return 0;
544 }
545 
546 static const struct ntb_dev_ops switchtec_ntb_ops = {
547 	.mw_count		= switchtec_ntb_mw_count,
548 	.mw_get_align		= switchtec_ntb_mw_get_align,
549 	.mw_set_trans		= switchtec_ntb_mw_set_trans,
550 	.peer_mw_count		= switchtec_ntb_peer_mw_count,
551 	.peer_mw_get_addr	= switchtec_ntb_peer_mw_get_addr,
552 	.link_is_up		= switchtec_ntb_link_is_up,
553 	.link_enable		= switchtec_ntb_link_enable,
554 	.link_disable		= switchtec_ntb_link_disable,
555 	.db_valid_mask		= switchtec_ntb_db_valid_mask,
556 	.db_vector_count	= switchtec_ntb_db_vector_count,
557 	.db_vector_mask		= switchtec_ntb_db_vector_mask,
558 	.db_read		= switchtec_ntb_db_read,
559 	.db_clear		= switchtec_ntb_db_clear,
560 	.db_set_mask		= switchtec_ntb_db_set_mask,
561 	.db_clear_mask		= switchtec_ntb_db_clear_mask,
562 	.db_read_mask		= switchtec_ntb_db_read_mask,
563 	.peer_db_addr		= switchtec_ntb_peer_db_addr,
564 	.peer_db_set		= switchtec_ntb_peer_db_set,
565 	.spad_count		= switchtec_ntb_spad_count,
566 	.spad_read		= switchtec_ntb_spad_read,
567 	.spad_write		= switchtec_ntb_spad_write,
568 	.peer_spad_read		= switchtec_ntb_peer_spad_read,
569 	.peer_spad_write	= switchtec_ntb_peer_spad_write,
570 	.peer_spad_addr		= switchtec_ntb_peer_spad_addr,
571 };
572 
573 static void switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
574 {
575 	u64 part_map;
576 
577 	sndev->ntb.pdev = sndev->stdev->pdev;
578 	sndev->ntb.topo = NTB_TOPO_SWITCH;
579 	sndev->ntb.ops = &switchtec_ntb_ops;
580 
581 	sndev->self_partition = sndev->stdev->partition;
582 
583 	sndev->mmio_ntb = sndev->stdev->mmio_ntb;
584 	part_map = ioread64(&sndev->mmio_ntb->ep_map);
585 	part_map &= ~(1 << sndev->self_partition);
586 	sndev->peer_partition = ffs(part_map) - 1;
587 
588 	dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d (%llx)",
589 		sndev->self_partition, sndev->stdev->partition_count,
590 		part_map);
591 
592 	sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
593 		SWITCHTEC_NTB_REG_CTRL_OFFSET;
594 	sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
595 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
596 
597 	sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
598 	sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
599 	sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
600 }
601 
602 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
603 {
604 	int i;
605 	int cnt = 0;
606 
607 	for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
608 		u32 r = ioread32(&ctrl->bar_entry[i].ctl);
609 
610 		if (r & NTB_CTRL_BAR_VALID)
611 			map[cnt++] = i;
612 	}
613 
614 	return cnt;
615 }
616 
617 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
618 {
619 	sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
620 				       sndev->mmio_self_ctrl);
621 
622 	sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
623 	sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
624 
625 	dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut",
626 		sndev->nr_direct_mw, sndev->nr_lut_mw);
627 
628 	sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
629 					    sndev->mmio_peer_ctrl);
630 
631 	sndev->peer_nr_lut_mw =
632 		ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
633 	sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
634 
635 	dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut",
636 		sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
637 
638 }
639 
640 /*
641  * There are 64 doorbells in the switch hardware but this is
642  * shared among all partitions. So we must split them in half
643  * (32 for each partition). However, the message interrupts are
644  * also shared with the top 4 doorbells so we just limit this to
645  * 28 doorbells per partition
646  */
647 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
648 {
649 	sndev->db_valid_mask = 0x0FFFFFFF;
650 
651 	if (sndev->self_partition < sndev->peer_partition) {
652 		sndev->db_shift = 0;
653 		sndev->db_peer_shift = 32;
654 	} else {
655 		sndev->db_shift = 32;
656 		sndev->db_peer_shift = 0;
657 	}
658 
659 	sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
660 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
661 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
662 		  &sndev->mmio_self_dbmsg->odb_mask);
663 }
664 
665 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
666 {
667 	int i;
668 	u32 msg_map = 0;
669 
670 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
671 		int m = i | sndev->peer_partition << 2;
672 
673 		msg_map |= m << i * 8;
674 	}
675 
676 	iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
677 
678 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
679 		iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
680 			  &sndev->mmio_self_dbmsg->imsg[i]);
681 }
682 
683 static int switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
684 {
685 	int rc = 0;
686 	u16 req_id;
687 	u32 error;
688 
689 	req_id = ioread16(&sndev->mmio_ntb->requester_id);
690 
691 	if (ioread32(&sndev->mmio_self_ctrl->req_id_table_size) < 2) {
692 		dev_err(&sndev->stdev->dev,
693 			"Not enough requester IDs available.");
694 		return -EFAULT;
695 	}
696 
697 	rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
698 				   NTB_CTRL_PART_OP_LOCK,
699 				   NTB_CTRL_PART_STATUS_LOCKED);
700 	if (rc)
701 		return rc;
702 
703 	iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
704 		  &sndev->mmio_self_ctrl->partition_ctrl);
705 
706 	/*
707 	 * Root Complex Requester ID (which is 0:00.0)
708 	 */
709 	iowrite32(0 << 16 | NTB_CTRL_REQ_ID_EN,
710 		  &sndev->mmio_self_ctrl->req_id_table[0]);
711 
712 	/*
713 	 * Host Bridge Requester ID (as read from the mmap address)
714 	 */
715 	iowrite32(req_id << 16 | NTB_CTRL_REQ_ID_EN,
716 		  &sndev->mmio_self_ctrl->req_id_table[1]);
717 
718 	rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
719 				   NTB_CTRL_PART_OP_CFG,
720 				   NTB_CTRL_PART_STATUS_NORMAL);
721 	if (rc == -EIO) {
722 		error = ioread32(&sndev->mmio_self_ctrl->req_id_error);
723 		dev_err(&sndev->stdev->dev,
724 			"Error setting up the requester ID table: %08x",
725 			error);
726 	}
727 
728 	return rc;
729 }
730 
731 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
732 {
733 	int i;
734 
735 	memset(sndev->self_shared, 0, LUT_SIZE);
736 	sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
737 	sndev->self_shared->partition_id = sndev->stdev->partition;
738 
739 	for (i = 0; i < sndev->nr_direct_mw; i++) {
740 		int bar = sndev->direct_mw_to_bar[i];
741 		resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
742 
743 		if (i == 0)
744 			sz = min_t(resource_size_t, sz,
745 				   LUT_SIZE * sndev->nr_lut_mw);
746 
747 		sndev->self_shared->mw_sizes[i] = sz;
748 	}
749 
750 	for (i = 0; i < sndev->nr_lut_mw; i++) {
751 		int idx = sndev->nr_direct_mw + i;
752 
753 		sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
754 	}
755 }
756 
757 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
758 {
759 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
760 	int bar = sndev->direct_mw_to_bar[0];
761 	u32 ctl_val;
762 	int rc;
763 
764 	sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
765 						 LUT_SIZE,
766 						 &sndev->self_shared_dma,
767 						 GFP_KERNEL);
768 	if (!sndev->self_shared) {
769 		dev_err(&sndev->stdev->dev,
770 			"unable to allocate memory for shared mw");
771 		return -ENOMEM;
772 	}
773 
774 	switchtec_ntb_init_shared(sndev);
775 
776 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
777 				   NTB_CTRL_PART_STATUS_LOCKED);
778 	if (rc)
779 		goto unalloc_and_exit;
780 
781 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
782 	ctl_val &= 0xFF;
783 	ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
784 	ctl_val |= ilog2(LUT_SIZE) << 8;
785 	ctl_val |= (sndev->nr_lut_mw - 1) << 14;
786 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
787 
788 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) |
789 		   sndev->self_shared_dma),
790 		  &ctl->lut_entry[0]);
791 
792 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
793 				   NTB_CTRL_PART_STATUS_NORMAL);
794 	if (rc) {
795 		u32 bar_error, lut_error;
796 
797 		bar_error = ioread32(&ctl->bar_error);
798 		lut_error = ioread32(&ctl->lut_error);
799 		dev_err(&sndev->stdev->dev,
800 			"Error setting up shared MW: %08x / %08x",
801 			bar_error, lut_error);
802 		goto unalloc_and_exit;
803 	}
804 
805 	sndev->peer_shared = pci_iomap(sndev->stdev->pdev, bar, LUT_SIZE);
806 	if (!sndev->peer_shared) {
807 		rc = -ENOMEM;
808 		goto unalloc_and_exit;
809 	}
810 
811 	dev_dbg(&sndev->stdev->dev, "Shared MW Ready");
812 	return 0;
813 
814 unalloc_and_exit:
815 	dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
816 			  sndev->self_shared, sndev->self_shared_dma);
817 
818 	return rc;
819 }
820 
821 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
822 {
823 	if (sndev->peer_shared)
824 		pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
825 
826 	if (sndev->self_shared)
827 		dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
828 				  sndev->self_shared,
829 				  sndev->self_shared_dma);
830 }
831 
832 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
833 {
834 	struct switchtec_ntb *sndev = dev;
835 
836 	dev_dbg(&sndev->stdev->dev, "doorbell\n");
837 
838 	ntb_db_event(&sndev->ntb, 0);
839 
840 	return IRQ_HANDLED;
841 }
842 
843 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
844 {
845 	int i;
846 	struct switchtec_ntb *sndev = dev;
847 
848 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
849 		u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
850 
851 		if (msg & NTB_DBMSG_IMSG_STATUS) {
852 			dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", i,
853 				(u32)msg);
854 			iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
855 
856 			if (i == LINK_MESSAGE)
857 				switchtec_ntb_check_link(sndev);
858 		}
859 	}
860 
861 	return IRQ_HANDLED;
862 }
863 
864 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
865 {
866 	int i;
867 	int rc;
868 	int doorbell_irq = 0;
869 	int message_irq = 0;
870 	int event_irq;
871 	int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
872 
873 	event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
874 
875 	while (doorbell_irq == event_irq)
876 		doorbell_irq++;
877 	while (message_irq == doorbell_irq ||
878 	       message_irq == event_irq)
879 		message_irq++;
880 
881 	dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d",
882 		event_irq, doorbell_irq, message_irq);
883 
884 	for (i = 0; i < idb_vecs - 4; i++)
885 		iowrite8(doorbell_irq,
886 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
887 
888 	for (; i < idb_vecs; i++)
889 		iowrite8(message_irq,
890 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
891 
892 	sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
893 	sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
894 
895 	rc = request_irq(sndev->doorbell_irq,
896 			 switchtec_ntb_doorbell_isr, 0,
897 			 "switchtec_ntb_doorbell", sndev);
898 	if (rc)
899 		return rc;
900 
901 	rc = request_irq(sndev->message_irq,
902 			 switchtec_ntb_message_isr, 0,
903 			 "switchtec_ntb_message", sndev);
904 	if (rc) {
905 		free_irq(sndev->doorbell_irq, sndev);
906 		return rc;
907 	}
908 
909 	return 0;
910 }
911 
912 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
913 {
914 	free_irq(sndev->doorbell_irq, sndev);
915 	free_irq(sndev->message_irq, sndev);
916 }
917 
918 static int switchtec_ntb_add(struct device *dev,
919 			     struct class_interface *class_intf)
920 {
921 	struct switchtec_dev *stdev = to_stdev(dev);
922 	struct switchtec_ntb *sndev;
923 	int rc;
924 
925 	stdev->sndev = NULL;
926 
927 	if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
928 		return -ENODEV;
929 
930 	if (stdev->partition_count != 2)
931 		dev_warn(dev, "ntb driver only supports 2 partitions");
932 
933 	sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
934 	if (!sndev)
935 		return -ENOMEM;
936 
937 	sndev->stdev = stdev;
938 	switchtec_ntb_init_sndev(sndev);
939 	switchtec_ntb_init_mw(sndev);
940 	switchtec_ntb_init_db(sndev);
941 	switchtec_ntb_init_msgs(sndev);
942 
943 	rc = switchtec_ntb_init_req_id_table(sndev);
944 	if (rc)
945 		goto free_and_exit;
946 
947 	rc = switchtec_ntb_init_shared_mw(sndev);
948 	if (rc)
949 		goto free_and_exit;
950 
951 	rc = switchtec_ntb_init_db_msg_irq(sndev);
952 	if (rc)
953 		goto deinit_shared_and_exit;
954 
955 	rc = ntb_register_device(&sndev->ntb);
956 	if (rc)
957 		goto deinit_and_exit;
958 
959 	stdev->sndev = sndev;
960 	stdev->link_notifier = switchtec_ntb_link_notification;
961 	dev_info(dev, "NTB device registered");
962 
963 	return 0;
964 
965 deinit_and_exit:
966 	switchtec_ntb_deinit_db_msg_irq(sndev);
967 deinit_shared_and_exit:
968 	switchtec_ntb_deinit_shared_mw(sndev);
969 free_and_exit:
970 	kfree(sndev);
971 	dev_err(dev, "failed to register ntb device: %d", rc);
972 	return rc;
973 }
974 
975 void switchtec_ntb_remove(struct device *dev,
976 			  struct class_interface *class_intf)
977 {
978 	struct switchtec_dev *stdev = to_stdev(dev);
979 	struct switchtec_ntb *sndev = stdev->sndev;
980 
981 	if (!sndev)
982 		return;
983 
984 	stdev->link_notifier = NULL;
985 	stdev->sndev = NULL;
986 	ntb_unregister_device(&sndev->ntb);
987 	switchtec_ntb_deinit_db_msg_irq(sndev);
988 	switchtec_ntb_deinit_shared_mw(sndev);
989 	kfree(sndev);
990 	dev_info(dev, "ntb device unregistered");
991 }
992 
993 static struct class_interface switchtec_interface  = {
994 	.add_dev = switchtec_ntb_add,
995 	.remove_dev = switchtec_ntb_remove,
996 };
997 
998 static int __init switchtec_ntb_init(void)
999 {
1000 	switchtec_interface.class = switchtec_class;
1001 	return class_interface_register(&switchtec_interface);
1002 }
1003 module_init(switchtec_ntb_init);
1004 
1005 static void __exit switchtec_ntb_exit(void)
1006 {
1007 	class_interface_unregister(&switchtec_interface);
1008 }
1009 module_exit(switchtec_ntb_exit);
1010