xref: /linux/drivers/ntb/ntb_transport.c (revision dcf50ca7823506fb3f20b8ffd3f928003cddaeed)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * PCIe NTB Transport Linux driver
46  *
47  * Contact Information:
48  * Jon Mason <jon.mason@intel.com>
49  */
50 #include <linux/debugfs.h>
51 #include <linux/delay.h>
52 #include <linux/dmaengine.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/errno.h>
55 #include <linux/export.h>
56 #include <linux/interrupt.h>
57 #include <linux/module.h>
58 #include <linux/pci.h>
59 #include <linux/slab.h>
60 #include <linux/types.h>
61 #include <linux/uaccess.h>
62 #include <linux/mutex.h>
63 #include "linux/ntb.h"
64 #include "linux/ntb_transport.h"
65 
66 #define NTB_TRANSPORT_VERSION	4
67 #define NTB_TRANSPORT_VER	"4"
68 #define NTB_TRANSPORT_NAME	"ntb_transport"
69 #define NTB_TRANSPORT_DESC	"Software Queue-Pair Transport over NTB"
70 #define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2)
71 
72 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
73 MODULE_VERSION(NTB_TRANSPORT_VER);
74 MODULE_LICENSE("Dual BSD/GPL");
75 MODULE_AUTHOR("Intel Corporation");
76 
77 static unsigned long max_mw_size;
78 module_param(max_mw_size, ulong, 0644);
79 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
80 
81 static unsigned int transport_mtu = 0x10000;
82 module_param(transport_mtu, uint, 0644);
83 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
84 
85 static unsigned char max_num_clients;
86 module_param(max_num_clients, byte, 0644);
87 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
88 
89 static unsigned int copy_bytes = 1024;
90 module_param(copy_bytes, uint, 0644);
91 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
92 
93 static bool use_dma;
94 module_param(use_dma, bool, 0644);
95 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
96 
97 static bool use_msi;
98 #ifdef CONFIG_NTB_MSI
99 module_param(use_msi, bool, 0644);
100 MODULE_PARM_DESC(use_msi, "Use MSI interrupts instead of doorbells");
101 #endif
102 
103 static struct dentry *nt_debugfs_dir;
104 
105 /* Only two-ports NTB devices are supported */
106 #define PIDX		NTB_DEF_PEER_IDX
107 
108 struct ntb_queue_entry {
109 	/* ntb_queue list reference */
110 	struct list_head entry;
111 	/* pointers to data to be transferred */
112 	void *cb_data;
113 	void *buf;
114 	unsigned int len;
115 	unsigned int flags;
116 	int retries;
117 	int errors;
118 	unsigned int tx_index;
119 	unsigned int rx_index;
120 
121 	struct ntb_transport_qp *qp;
122 	union {
123 		struct ntb_payload_header __iomem *tx_hdr;
124 		struct ntb_payload_header *rx_hdr;
125 	};
126 };
127 
128 struct ntb_rx_info {
129 	unsigned int entry;
130 };
131 
132 struct ntb_transport_qp {
133 	struct ntb_transport_ctx *transport;
134 	struct ntb_dev *ndev;
135 	void *cb_data;
136 	struct dma_chan *tx_dma_chan;
137 	struct dma_chan *rx_dma_chan;
138 
139 	bool client_ready;
140 	bool link_is_up;
141 	bool active;
142 
143 	u8 qp_num;	/* Only 64 QP's are allowed.  0-63 */
144 	u64 qp_bit;
145 
146 	struct ntb_rx_info __iomem *rx_info;
147 	struct ntb_rx_info *remote_rx_info;
148 
149 	void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
150 			   void *data, int len);
151 	struct list_head tx_free_q;
152 	spinlock_t ntb_tx_free_q_lock;
153 	void __iomem *tx_mw;
154 	phys_addr_t tx_mw_phys;
155 	size_t tx_mw_size;
156 	dma_addr_t tx_mw_dma_addr;
157 	unsigned int tx_index;
158 	unsigned int tx_max_entry;
159 	unsigned int tx_max_frame;
160 
161 	void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
162 			   void *data, int len);
163 	struct list_head rx_post_q;
164 	struct list_head rx_pend_q;
165 	struct list_head rx_free_q;
166 	/* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
167 	spinlock_t ntb_rx_q_lock;
168 	void *rx_buff;
169 	unsigned int rx_index;
170 	unsigned int rx_max_entry;
171 	unsigned int rx_max_frame;
172 	unsigned int rx_alloc_entry;
173 	dma_cookie_t last_cookie;
174 	struct tasklet_struct rxc_db_work;
175 
176 	void (*event_handler)(void *data, int status);
177 	struct delayed_work link_work;
178 	struct work_struct link_cleanup;
179 
180 	struct dentry *debugfs_dir;
181 	struct dentry *debugfs_stats;
182 
183 	/* Stats */
184 	u64 rx_bytes;
185 	u64 rx_pkts;
186 	u64 rx_ring_empty;
187 	u64 rx_err_no_buf;
188 	u64 rx_err_oflow;
189 	u64 rx_err_ver;
190 	u64 rx_memcpy;
191 	u64 rx_async;
192 	u64 tx_bytes;
193 	u64 tx_pkts;
194 	u64 tx_ring_full;
195 	u64 tx_err_no_buf;
196 	u64 tx_memcpy;
197 	u64 tx_async;
198 
199 	bool use_msi;
200 	int msi_irq;
201 	struct ntb_msi_desc msi_desc;
202 	struct ntb_msi_desc peer_msi_desc;
203 };
204 
205 struct ntb_transport_mw {
206 	phys_addr_t phys_addr;
207 	resource_size_t phys_size;
208 	void __iomem *vbase;
209 	size_t xlat_size;
210 	size_t buff_size;
211 	size_t alloc_size;
212 	void *alloc_addr;
213 	void *virt_addr;
214 	dma_addr_t dma_addr;
215 };
216 
217 struct ntb_transport_client_dev {
218 	struct list_head entry;
219 	struct ntb_transport_ctx *nt;
220 	struct device dev;
221 };
222 
223 struct ntb_transport_ctx {
224 	struct list_head entry;
225 	struct list_head client_devs;
226 
227 	struct ntb_dev *ndev;
228 
229 	struct ntb_transport_mw *mw_vec;
230 	struct ntb_transport_qp *qp_vec;
231 	unsigned int mw_count;
232 	unsigned int qp_count;
233 	u64 qp_bitmap;
234 	u64 qp_bitmap_free;
235 
236 	bool use_msi;
237 	unsigned int msi_spad_offset;
238 	u64 msi_db_mask;
239 
240 	bool link_is_up;
241 	struct delayed_work link_work;
242 	struct work_struct link_cleanup;
243 
244 	struct dentry *debugfs_node_dir;
245 
246 	/* Make sure workq of link event be executed serially */
247 	struct mutex link_event_lock;
248 };
249 
250 enum {
251 	DESC_DONE_FLAG = BIT(0),
252 	LINK_DOWN_FLAG = BIT(1),
253 };
254 
255 struct ntb_payload_header {
256 	unsigned int ver;
257 	unsigned int len;
258 	unsigned int flags;
259 };
260 
261 enum {
262 	VERSION = 0,
263 	QP_LINKS,
264 	NUM_QPS,
265 	NUM_MWS,
266 	MW0_SZ_HIGH,
267 	MW0_SZ_LOW,
268 };
269 
270 #define dev_client_dev(__dev) \
271 	container_of((__dev), struct ntb_transport_client_dev, dev)
272 
273 #define drv_client(__drv) \
274 	container_of((__drv), struct ntb_transport_client, driver)
275 
276 #define QP_TO_MW(nt, qp)	((qp) % nt->mw_count)
277 #define NTB_QP_DEF_NUM_ENTRIES	100
278 #define NTB_LINK_DOWN_TIMEOUT	10
279 
280 static void ntb_transport_rxc_db(unsigned long data);
281 static const struct ntb_ctx_ops ntb_transport_ops;
282 static struct ntb_client ntb_transport_client;
283 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
284 			       struct ntb_queue_entry *entry);
285 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
286 static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset);
287 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);
288 
289 
ntb_transport_bus_match(struct device * dev,const struct device_driver * drv)290 static int ntb_transport_bus_match(struct device *dev,
291 				   const struct device_driver *drv)
292 {
293 	return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
294 }
295 
ntb_transport_bus_probe(struct device * dev)296 static int ntb_transport_bus_probe(struct device *dev)
297 {
298 	const struct ntb_transport_client *client;
299 	int rc;
300 
301 	get_device(dev);
302 
303 	client = drv_client(dev->driver);
304 	rc = client->probe(dev);
305 	if (rc)
306 		put_device(dev);
307 
308 	return rc;
309 }
310 
ntb_transport_bus_remove(struct device * dev)311 static void ntb_transport_bus_remove(struct device *dev)
312 {
313 	const struct ntb_transport_client *client;
314 
315 	client = drv_client(dev->driver);
316 	client->remove(dev);
317 
318 	put_device(dev);
319 }
320 
321 static const struct bus_type ntb_transport_bus = {
322 	.name = "ntb_transport",
323 	.match = ntb_transport_bus_match,
324 	.probe = ntb_transport_bus_probe,
325 	.remove = ntb_transport_bus_remove,
326 };
327 
328 static LIST_HEAD(ntb_transport_list);
329 
ntb_bus_init(struct ntb_transport_ctx * nt)330 static int ntb_bus_init(struct ntb_transport_ctx *nt)
331 {
332 	list_add_tail(&nt->entry, &ntb_transport_list);
333 	return 0;
334 }
335 
ntb_bus_remove(struct ntb_transport_ctx * nt)336 static void ntb_bus_remove(struct ntb_transport_ctx *nt)
337 {
338 	struct ntb_transport_client_dev *client_dev, *cd;
339 
340 	list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
341 		dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
342 			dev_name(&client_dev->dev));
343 		list_del(&client_dev->entry);
344 		device_unregister(&client_dev->dev);
345 	}
346 
347 	list_del(&nt->entry);
348 }
349 
ntb_transport_client_release(struct device * dev)350 static void ntb_transport_client_release(struct device *dev)
351 {
352 	struct ntb_transport_client_dev *client_dev;
353 
354 	client_dev = dev_client_dev(dev);
355 	kfree(client_dev);
356 }
357 
358 /**
359  * ntb_transport_unregister_client_dev - Unregister NTB client device
360  * @device_name: Name of NTB client device
361  *
362  * Unregister an NTB client device with the NTB transport layer
363  */
ntb_transport_unregister_client_dev(char * device_name)364 void ntb_transport_unregister_client_dev(char *device_name)
365 {
366 	struct ntb_transport_client_dev *client, *cd;
367 	struct ntb_transport_ctx *nt;
368 
369 	list_for_each_entry(nt, &ntb_transport_list, entry)
370 		list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
371 			if (!strncmp(dev_name(&client->dev), device_name,
372 				     strlen(device_name))) {
373 				list_del(&client->entry);
374 				device_unregister(&client->dev);
375 			}
376 }
377 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
378 
379 /**
380  * ntb_transport_register_client_dev - Register NTB client device
381  * @device_name: Name of NTB client device
382  *
383  * Register an NTB client device with the NTB transport layer
384  *
385  * Returns: %0 on success or -errno code on error
386  */
ntb_transport_register_client_dev(char * device_name)387 int ntb_transport_register_client_dev(char *device_name)
388 {
389 	struct ntb_transport_client_dev *client_dev;
390 	struct ntb_transport_ctx *nt;
391 	int node;
392 	int rc, i = 0;
393 
394 	if (list_empty(&ntb_transport_list))
395 		return -ENODEV;
396 
397 	list_for_each_entry(nt, &ntb_transport_list, entry) {
398 		struct device *dev;
399 
400 		node = dev_to_node(&nt->ndev->dev);
401 
402 		client_dev = kzalloc_node(sizeof(*client_dev),
403 					  GFP_KERNEL, node);
404 		if (!client_dev) {
405 			rc = -ENOMEM;
406 			goto err;
407 		}
408 
409 		dev = &client_dev->dev;
410 
411 		/* setup and register client devices */
412 		dev_set_name(dev, "%s%d", device_name, i);
413 		dev->bus = &ntb_transport_bus;
414 		dev->release = ntb_transport_client_release;
415 		dev->parent = &nt->ndev->dev;
416 
417 		rc = device_register(dev);
418 		if (rc) {
419 			put_device(dev);
420 			goto err;
421 		}
422 
423 		list_add_tail(&client_dev->entry, &nt->client_devs);
424 		i++;
425 	}
426 
427 	return 0;
428 
429 err:
430 	ntb_transport_unregister_client_dev(device_name);
431 
432 	return rc;
433 }
434 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
435 
436 /**
437  * ntb_transport_register_client - Register NTB client driver
438  * @drv: NTB client driver to be registered
439  *
440  * Register an NTB client driver with the NTB transport layer
441  *
442  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
443  */
ntb_transport_register_client(struct ntb_transport_client * drv)444 int ntb_transport_register_client(struct ntb_transport_client *drv)
445 {
446 	drv->driver.bus = &ntb_transport_bus;
447 
448 	if (list_empty(&ntb_transport_list))
449 		return -ENODEV;
450 
451 	return driver_register(&drv->driver);
452 }
453 EXPORT_SYMBOL_GPL(ntb_transport_register_client);
454 
455 /**
456  * ntb_transport_unregister_client - Unregister NTB client driver
457  * @drv: NTB client driver to be unregistered
458  *
459  * Unregister an NTB client driver with the NTB transport layer
460  *
461  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
462  */
ntb_transport_unregister_client(struct ntb_transport_client * drv)463 void ntb_transport_unregister_client(struct ntb_transport_client *drv)
464 {
465 	driver_unregister(&drv->driver);
466 }
467 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
468 
debugfs_read(struct file * filp,char __user * ubuf,size_t count,loff_t * offp)469 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
470 			    loff_t *offp)
471 {
472 	struct ntb_transport_qp *qp;
473 	char *buf;
474 	ssize_t ret, out_offset, out_count;
475 
476 	qp = filp->private_data;
477 
478 	if (!qp || !qp->link_is_up)
479 		return 0;
480 
481 	out_count = 1000;
482 
483 	buf = kmalloc(out_count, GFP_KERNEL);
484 	if (!buf)
485 		return -ENOMEM;
486 
487 	out_offset = 0;
488 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
489 			       "\nNTB QP stats:\n\n");
490 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
491 			       "rx_bytes - \t%llu\n", qp->rx_bytes);
492 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
493 			       "rx_pkts - \t%llu\n", qp->rx_pkts);
494 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
495 			       "rx_memcpy - \t%llu\n", qp->rx_memcpy);
496 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
497 			       "rx_async - \t%llu\n", qp->rx_async);
498 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
499 			       "rx_ring_empty - %llu\n", qp->rx_ring_empty);
500 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
501 			       "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
502 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
503 			       "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
504 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
505 			       "rx_err_ver - \t%llu\n", qp->rx_err_ver);
506 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
507 			       "rx_buff - \t0x%p\n", qp->rx_buff);
508 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
509 			       "rx_index - \t%u\n", qp->rx_index);
510 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
511 			       "rx_max_entry - \t%u\n", qp->rx_max_entry);
512 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
513 			       "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
514 
515 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
516 			       "tx_bytes - \t%llu\n", qp->tx_bytes);
517 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
518 			       "tx_pkts - \t%llu\n", qp->tx_pkts);
519 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
520 			       "tx_memcpy - \t%llu\n", qp->tx_memcpy);
521 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
522 			       "tx_async - \t%llu\n", qp->tx_async);
523 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
524 			       "tx_ring_full - \t%llu\n", qp->tx_ring_full);
525 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
526 			       "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
527 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
528 			       "tx_mw - \t0x%p\n", qp->tx_mw);
529 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
530 			       "tx_index (H) - \t%u\n", qp->tx_index);
531 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
532 			       "RRI (T) - \t%u\n",
533 			       qp->remote_rx_info->entry);
534 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
535 			       "tx_max_entry - \t%u\n", qp->tx_max_entry);
536 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
537 			       "free tx - \t%u\n",
538 			       ntb_transport_tx_free_entry(qp));
539 
540 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
541 			       "\n");
542 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
543 			       "Using TX DMA - \t%s\n",
544 			       qp->tx_dma_chan ? "Yes" : "No");
545 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
546 			       "Using RX DMA - \t%s\n",
547 			       qp->rx_dma_chan ? "Yes" : "No");
548 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
549 			       "QP Link - \t%s\n",
550 			       qp->link_is_up ? "Up" : "Down");
551 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
552 			       "\n");
553 
554 	if (out_offset > out_count)
555 		out_offset = out_count;
556 
557 	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
558 	kfree(buf);
559 	return ret;
560 }
561 
562 static const struct file_operations ntb_qp_debugfs_stats = {
563 	.owner = THIS_MODULE,
564 	.open = simple_open,
565 	.read = debugfs_read,
566 };
567 
ntb_list_add(spinlock_t * lock,struct list_head * entry,struct list_head * list)568 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
569 			 struct list_head *list)
570 {
571 	unsigned long flags;
572 
573 	spin_lock_irqsave(lock, flags);
574 	list_add_tail(entry, list);
575 	spin_unlock_irqrestore(lock, flags);
576 }
577 
ntb_list_rm(spinlock_t * lock,struct list_head * list)578 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
579 					   struct list_head *list)
580 {
581 	struct ntb_queue_entry *entry;
582 	unsigned long flags;
583 
584 	spin_lock_irqsave(lock, flags);
585 	if (list_empty(list)) {
586 		entry = NULL;
587 		goto out;
588 	}
589 	entry = list_first_entry(list, struct ntb_queue_entry, entry);
590 	list_del(&entry->entry);
591 
592 out:
593 	spin_unlock_irqrestore(lock, flags);
594 
595 	return entry;
596 }
597 
ntb_list_mv(spinlock_t * lock,struct list_head * list,struct list_head * to_list)598 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
599 					   struct list_head *list,
600 					   struct list_head *to_list)
601 {
602 	struct ntb_queue_entry *entry;
603 	unsigned long flags;
604 
605 	spin_lock_irqsave(lock, flags);
606 
607 	if (list_empty(list)) {
608 		entry = NULL;
609 	} else {
610 		entry = list_first_entry(list, struct ntb_queue_entry, entry);
611 		list_move_tail(&entry->entry, to_list);
612 	}
613 
614 	spin_unlock_irqrestore(lock, flags);
615 
616 	return entry;
617 }
618 
ntb_transport_setup_qp_mw(struct ntb_transport_ctx * nt,unsigned int qp_num)619 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
620 				     unsigned int qp_num)
621 {
622 	struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
623 	struct ntb_transport_mw *mw;
624 	struct ntb_dev *ndev = nt->ndev;
625 	struct ntb_queue_entry *entry;
626 	unsigned int rx_size, num_qps_mw;
627 	unsigned int mw_num, mw_count, qp_count;
628 	unsigned int i;
629 	int node;
630 
631 	mw_count = nt->mw_count;
632 	qp_count = nt->qp_count;
633 
634 	mw_num = QP_TO_MW(nt, qp_num);
635 	mw = &nt->mw_vec[mw_num];
636 
637 	if (!mw->virt_addr)
638 		return -ENOMEM;
639 
640 	if (mw_num < qp_count % mw_count)
641 		num_qps_mw = qp_count / mw_count + 1;
642 	else
643 		num_qps_mw = qp_count / mw_count;
644 
645 	rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
646 	qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
647 	rx_size -= sizeof(struct ntb_rx_info);
648 
649 	qp->remote_rx_info = qp->rx_buff + rx_size;
650 
651 	/* Due to housekeeping, there must be atleast 2 buffs */
652 	qp->rx_max_frame = min(transport_mtu, rx_size / 2);
653 	qp->rx_max_entry = rx_size / qp->rx_max_frame;
654 	qp->rx_index = 0;
655 
656 	/*
657 	 * Checking to see if we have more entries than the default.
658 	 * We should add additional entries if that is the case so we
659 	 * can be in sync with the transport frames.
660 	 */
661 	node = dev_to_node(&ndev->dev);
662 	for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
663 		entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
664 		if (!entry)
665 			return -ENOMEM;
666 
667 		entry->qp = qp;
668 		ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
669 			     &qp->rx_free_q);
670 		qp->rx_alloc_entry++;
671 	}
672 
673 	qp->remote_rx_info->entry = qp->rx_max_entry - 1;
674 
675 	/* setup the hdr offsets with 0's */
676 	for (i = 0; i < qp->rx_max_entry; i++) {
677 		void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
678 				sizeof(struct ntb_payload_header));
679 		memset(offset, 0, sizeof(struct ntb_payload_header));
680 	}
681 
682 	qp->rx_pkts = 0;
683 	qp->tx_pkts = 0;
684 	qp->tx_index = 0;
685 
686 	return 0;
687 }
688 
ntb_transport_isr(int irq,void * dev)689 static irqreturn_t ntb_transport_isr(int irq, void *dev)
690 {
691 	struct ntb_transport_qp *qp = dev;
692 
693 	tasklet_schedule(&qp->rxc_db_work);
694 
695 	return IRQ_HANDLED;
696 }
697 
ntb_transport_setup_qp_peer_msi(struct ntb_transport_ctx * nt,unsigned int qp_num)698 static void ntb_transport_setup_qp_peer_msi(struct ntb_transport_ctx *nt,
699 					    unsigned int qp_num)
700 {
701 	struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
702 	int spad = qp_num * 2 + nt->msi_spad_offset;
703 
704 	if (!nt->use_msi)
705 		return;
706 
707 	if (spad >= ntb_spad_count(nt->ndev))
708 		return;
709 
710 	qp->peer_msi_desc.addr_offset =
711 		ntb_peer_spad_read(qp->ndev, PIDX, spad);
712 	qp->peer_msi_desc.data =
713 		ntb_peer_spad_read(qp->ndev, PIDX, spad + 1);
714 
715 	dev_dbg(&qp->ndev->pdev->dev, "QP%d Peer MSI addr=%x data=%x\n",
716 		qp_num, qp->peer_msi_desc.addr_offset, qp->peer_msi_desc.data);
717 
718 	if (qp->peer_msi_desc.addr_offset) {
719 		qp->use_msi = true;
720 		dev_info(&qp->ndev->pdev->dev,
721 			 "Using MSI interrupts for QP%d\n", qp_num);
722 	}
723 }
724 
ntb_transport_setup_qp_msi(struct ntb_transport_ctx * nt,unsigned int qp_num)725 static void ntb_transport_setup_qp_msi(struct ntb_transport_ctx *nt,
726 				       unsigned int qp_num)
727 {
728 	struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
729 	int spad = qp_num * 2 + nt->msi_spad_offset;
730 	int rc;
731 
732 	if (!nt->use_msi)
733 		return;
734 
735 	if (spad >= ntb_spad_count(nt->ndev)) {
736 		dev_warn_once(&qp->ndev->pdev->dev,
737 			      "Not enough SPADS to use MSI interrupts\n");
738 		return;
739 	}
740 
741 	ntb_spad_write(qp->ndev, spad, 0);
742 	ntb_spad_write(qp->ndev, spad + 1, 0);
743 
744 	if (!qp->msi_irq) {
745 		qp->msi_irq = ntbm_msi_request_irq(qp->ndev, ntb_transport_isr,
746 						   KBUILD_MODNAME, qp,
747 						   &qp->msi_desc);
748 		if (qp->msi_irq < 0) {
749 			dev_warn(&qp->ndev->pdev->dev,
750 				 "Unable to allocate MSI interrupt for qp%d\n",
751 				 qp_num);
752 			return;
753 		}
754 	}
755 
756 	rc = ntb_spad_write(qp->ndev, spad, qp->msi_desc.addr_offset);
757 	if (rc)
758 		goto err_free_interrupt;
759 
760 	rc = ntb_spad_write(qp->ndev, spad + 1, qp->msi_desc.data);
761 	if (rc)
762 		goto err_free_interrupt;
763 
764 	dev_dbg(&qp->ndev->pdev->dev, "QP%d MSI %d addr=%x data=%x\n",
765 		qp_num, qp->msi_irq, qp->msi_desc.addr_offset,
766 		qp->msi_desc.data);
767 
768 	return;
769 
770 err_free_interrupt:
771 	devm_free_irq(&nt->ndev->dev, qp->msi_irq, qp);
772 }
773 
ntb_transport_msi_peer_desc_changed(struct ntb_transport_ctx * nt)774 static void ntb_transport_msi_peer_desc_changed(struct ntb_transport_ctx *nt)
775 {
776 	int i;
777 
778 	dev_dbg(&nt->ndev->pdev->dev, "Peer MSI descriptors changed");
779 
780 	for (i = 0; i < nt->qp_count; i++)
781 		ntb_transport_setup_qp_peer_msi(nt, i);
782 }
783 
ntb_transport_msi_desc_changed(void * data)784 static void ntb_transport_msi_desc_changed(void *data)
785 {
786 	struct ntb_transport_ctx *nt = data;
787 	int i;
788 
789 	dev_dbg(&nt->ndev->pdev->dev, "MSI descriptors changed");
790 
791 	for (i = 0; i < nt->qp_count; i++)
792 		ntb_transport_setup_qp_msi(nt, i);
793 
794 	ntb_peer_db_set(nt->ndev, nt->msi_db_mask);
795 }
796 
ntb_free_mw(struct ntb_transport_ctx * nt,int num_mw)797 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
798 {
799 	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
800 	struct pci_dev *pdev = nt->ndev->pdev;
801 
802 	if (!mw->virt_addr)
803 		return;
804 
805 	ntb_mw_clear_trans(nt->ndev, PIDX, num_mw);
806 	dma_free_coherent(&pdev->dev, mw->alloc_size,
807 			  mw->alloc_addr, mw->dma_addr);
808 	mw->xlat_size = 0;
809 	mw->buff_size = 0;
810 	mw->alloc_size = 0;
811 	mw->alloc_addr = NULL;
812 	mw->virt_addr = NULL;
813 }
814 
ntb_alloc_mw_buffer(struct ntb_transport_mw * mw,struct device * ntb_dev,size_t align)815 static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw,
816 			       struct device *ntb_dev, size_t align)
817 {
818 	dma_addr_t dma_addr;
819 	void *alloc_addr, *virt_addr;
820 	int rc;
821 
822 	/*
823 	 * The buffer here is allocated against the NTB device. The reason to
824 	 * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer
825 	 * backing the NTB BAR for the remote host to write to. During receive
826 	 * processing, the data is being copied out of the receive buffer to
827 	 * the kernel skbuff. When a DMA device is being used, dma_map_page()
828 	 * is called on the kvaddr of the receive buffer (from dma_alloc_*())
829 	 * and remapped against the DMA device. It appears to be a double
830 	 * DMA mapping of buffers, but first is mapped to the NTB device and
831 	 * second is to the DMA device. DMA_ATTR_FORCE_CONTIGUOUS is necessary
832 	 * in order for the later dma_map_page() to not fail.
833 	 */
834 	alloc_addr = dma_alloc_attrs(ntb_dev, mw->alloc_size,
835 				     &dma_addr, GFP_KERNEL,
836 				     DMA_ATTR_FORCE_CONTIGUOUS);
837 	if (!alloc_addr) {
838 		dev_err(ntb_dev, "Unable to alloc MW buff of size %zu\n",
839 			mw->alloc_size);
840 		return -ENOMEM;
841 	}
842 	virt_addr = alloc_addr;
843 
844 	/*
845 	 * we must ensure that the memory address allocated is BAR size
846 	 * aligned in order for the XLAT register to take the value. This
847 	 * is a requirement of the hardware. It is recommended to setup CMA
848 	 * for BAR sizes equal or greater than 4MB.
849 	 */
850 	if (!IS_ALIGNED(dma_addr, align)) {
851 		if (mw->alloc_size > mw->buff_size) {
852 			virt_addr = PTR_ALIGN(alloc_addr, align);
853 			dma_addr = ALIGN(dma_addr, align);
854 		} else {
855 			rc = -ENOMEM;
856 			goto err;
857 		}
858 	}
859 
860 	mw->alloc_addr = alloc_addr;
861 	mw->virt_addr = virt_addr;
862 	mw->dma_addr = dma_addr;
863 
864 	return 0;
865 
866 err:
867 	dma_free_coherent(ntb_dev, mw->alloc_size, alloc_addr, dma_addr);
868 
869 	return rc;
870 }
871 
ntb_set_mw(struct ntb_transport_ctx * nt,int num_mw,resource_size_t size)872 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
873 		      resource_size_t size)
874 {
875 	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
876 	struct pci_dev *pdev = nt->ndev->pdev;
877 	size_t xlat_size, buff_size;
878 	resource_size_t xlat_align;
879 	resource_size_t xlat_align_size;
880 	int rc;
881 
882 	if (!size)
883 		return -EINVAL;
884 
885 	rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align,
886 			      &xlat_align_size, NULL);
887 	if (rc)
888 		return rc;
889 
890 	xlat_size = round_up(size, xlat_align_size);
891 	buff_size = round_up(size, xlat_align);
892 
893 	/* No need to re-setup */
894 	if (mw->xlat_size == xlat_size)
895 		return 0;
896 
897 	if (mw->buff_size)
898 		ntb_free_mw(nt, num_mw);
899 
900 	/* Alloc memory for receiving data.  Must be aligned */
901 	mw->xlat_size = xlat_size;
902 	mw->buff_size = buff_size;
903 	mw->alloc_size = buff_size;
904 
905 	rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align);
906 	if (rc) {
907 		mw->alloc_size *= 2;
908 		rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align);
909 		if (rc) {
910 			dev_err(&pdev->dev,
911 				"Unable to alloc aligned MW buff\n");
912 			mw->xlat_size = 0;
913 			mw->buff_size = 0;
914 			mw->alloc_size = 0;
915 			return rc;
916 		}
917 	}
918 
919 	/* Notify HW the memory location of the receive buffer */
920 	rc = ntb_mw_set_trans(nt->ndev, PIDX, num_mw, mw->dma_addr,
921 			      mw->xlat_size);
922 	if (rc) {
923 		dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
924 		ntb_free_mw(nt, num_mw);
925 		return -EIO;
926 	}
927 
928 	return 0;
929 }
930 
ntb_qp_link_context_reset(struct ntb_transport_qp * qp)931 static void ntb_qp_link_context_reset(struct ntb_transport_qp *qp)
932 {
933 	qp->link_is_up = false;
934 	qp->active = false;
935 
936 	qp->tx_index = 0;
937 	qp->rx_index = 0;
938 	qp->rx_bytes = 0;
939 	qp->rx_pkts = 0;
940 	qp->rx_ring_empty = 0;
941 	qp->rx_err_no_buf = 0;
942 	qp->rx_err_oflow = 0;
943 	qp->rx_err_ver = 0;
944 	qp->rx_memcpy = 0;
945 	qp->rx_async = 0;
946 	qp->tx_bytes = 0;
947 	qp->tx_pkts = 0;
948 	qp->tx_ring_full = 0;
949 	qp->tx_err_no_buf = 0;
950 	qp->tx_memcpy = 0;
951 	qp->tx_async = 0;
952 }
953 
ntb_qp_link_down_reset(struct ntb_transport_qp * qp)954 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
955 {
956 	ntb_qp_link_context_reset(qp);
957 	if (qp->remote_rx_info)
958 		qp->remote_rx_info->entry = qp->rx_max_entry - 1;
959 }
960 
ntb_qp_link_cleanup(struct ntb_transport_qp * qp)961 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
962 {
963 	struct ntb_transport_ctx *nt = qp->transport;
964 	struct pci_dev *pdev = nt->ndev->pdev;
965 
966 	dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
967 
968 	cancel_delayed_work_sync(&qp->link_work);
969 	ntb_qp_link_down_reset(qp);
970 
971 	if (qp->event_handler)
972 		qp->event_handler(qp->cb_data, qp->link_is_up);
973 }
974 
ntb_qp_link_cleanup_work(struct work_struct * work)975 static void ntb_qp_link_cleanup_work(struct work_struct *work)
976 {
977 	struct ntb_transport_qp *qp = container_of(work,
978 						   struct ntb_transport_qp,
979 						   link_cleanup);
980 	struct ntb_transport_ctx *nt = qp->transport;
981 
982 	ntb_qp_link_cleanup(qp);
983 
984 	if (nt->link_is_up)
985 		schedule_delayed_work(&qp->link_work,
986 				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
987 }
988 
ntb_qp_link_down(struct ntb_transport_qp * qp)989 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
990 {
991 	schedule_work(&qp->link_cleanup);
992 }
993 
ntb_transport_link_cleanup(struct ntb_transport_ctx * nt)994 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
995 {
996 	struct ntb_transport_qp *qp;
997 	u64 qp_bitmap_alloc;
998 	unsigned int i, count;
999 
1000 	qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
1001 
1002 	/* Pass along the info to any clients */
1003 	for (i = 0; i < nt->qp_count; i++)
1004 		if (qp_bitmap_alloc & BIT_ULL(i)) {
1005 			qp = &nt->qp_vec[i];
1006 			ntb_qp_link_cleanup(qp);
1007 			cancel_work_sync(&qp->link_cleanup);
1008 			cancel_delayed_work_sync(&qp->link_work);
1009 		}
1010 
1011 	if (!nt->link_is_up)
1012 		cancel_delayed_work_sync(&nt->link_work);
1013 
1014 	for (i = 0; i < nt->mw_count; i++)
1015 		ntb_free_mw(nt, i);
1016 
1017 	/* The scratchpad registers keep the values if the remote side
1018 	 * goes down, blast them now to give them a sane value the next
1019 	 * time they are accessed
1020 	 */
1021 	count = ntb_spad_count(nt->ndev);
1022 	for (i = 0; i < count; i++)
1023 		ntb_spad_write(nt->ndev, i, 0);
1024 }
1025 
ntb_transport_link_cleanup_work(struct work_struct * work)1026 static void ntb_transport_link_cleanup_work(struct work_struct *work)
1027 {
1028 	struct ntb_transport_ctx *nt =
1029 		container_of(work, struct ntb_transport_ctx, link_cleanup);
1030 
1031 	guard(mutex)(&nt->link_event_lock);
1032 	ntb_transport_link_cleanup(nt);
1033 }
1034 
ntb_transport_event_callback(void * data)1035 static void ntb_transport_event_callback(void *data)
1036 {
1037 	struct ntb_transport_ctx *nt = data;
1038 
1039 	if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
1040 		schedule_delayed_work(&nt->link_work, 0);
1041 	else
1042 		schedule_work(&nt->link_cleanup);
1043 }
1044 
ntb_transport_link_work(struct work_struct * work)1045 static void ntb_transport_link_work(struct work_struct *work)
1046 {
1047 	struct ntb_transport_ctx *nt =
1048 		container_of(work, struct ntb_transport_ctx, link_work.work);
1049 	struct ntb_dev *ndev = nt->ndev;
1050 	struct pci_dev *pdev = ndev->pdev;
1051 	resource_size_t size;
1052 	u32 val;
1053 	int rc = 0, i, spad;
1054 
1055 	guard(mutex)(&nt->link_event_lock);
1056 
1057 	/* send the local info, in the opposite order of the way we read it */
1058 
1059 	if (nt->use_msi) {
1060 		rc = ntb_msi_setup_mws(ndev);
1061 		if (rc) {
1062 			dev_warn(&pdev->dev,
1063 				 "Failed to register MSI memory window: %d\n",
1064 				 rc);
1065 			nt->use_msi = false;
1066 		}
1067 	}
1068 
1069 	for (i = 0; i < nt->qp_count; i++)
1070 		ntb_transport_setup_qp_msi(nt, i);
1071 
1072 	for (i = 0; i < nt->mw_count; i++) {
1073 		size = nt->mw_vec[i].phys_size;
1074 
1075 		if (max_mw_size && size > max_mw_size)
1076 			size = max_mw_size;
1077 
1078 		spad = MW0_SZ_HIGH + (i * 2);
1079 		ntb_peer_spad_write(ndev, PIDX, spad, upper_32_bits(size));
1080 
1081 		spad = MW0_SZ_LOW + (i * 2);
1082 		ntb_peer_spad_write(ndev, PIDX, spad, lower_32_bits(size));
1083 	}
1084 
1085 	ntb_peer_spad_write(ndev, PIDX, NUM_MWS, nt->mw_count);
1086 
1087 	ntb_peer_spad_write(ndev, PIDX, NUM_QPS, nt->qp_count);
1088 
1089 	ntb_peer_spad_write(ndev, PIDX, VERSION, NTB_TRANSPORT_VERSION);
1090 
1091 	/* Query the remote side for its info */
1092 	val = ntb_spad_read(ndev, VERSION);
1093 	dev_dbg(&pdev->dev, "Remote version = %d\n", val);
1094 	if (val != NTB_TRANSPORT_VERSION)
1095 		goto out;
1096 
1097 	val = ntb_spad_read(ndev, NUM_QPS);
1098 	dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
1099 	if (val != nt->qp_count)
1100 		goto out;
1101 
1102 	val = ntb_spad_read(ndev, NUM_MWS);
1103 	dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
1104 	if (val != nt->mw_count)
1105 		goto out;
1106 
1107 	for (i = 0; i < nt->mw_count; i++) {
1108 		u64 val64;
1109 
1110 		val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
1111 		val64 = (u64)val << 32;
1112 
1113 		val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
1114 		val64 |= val;
1115 
1116 		dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
1117 
1118 		rc = ntb_set_mw(nt, i, val64);
1119 		if (rc)
1120 			goto out1;
1121 	}
1122 
1123 	nt->link_is_up = true;
1124 
1125 	for (i = 0; i < nt->qp_count; i++) {
1126 		struct ntb_transport_qp *qp = &nt->qp_vec[i];
1127 
1128 		ntb_transport_setup_qp_mw(nt, i);
1129 		ntb_transport_setup_qp_peer_msi(nt, i);
1130 
1131 		if (qp->client_ready)
1132 			schedule_delayed_work(&qp->link_work, 0);
1133 	}
1134 
1135 	return;
1136 
1137 out1:
1138 	for (i = 0; i < nt->mw_count; i++)
1139 		ntb_free_mw(nt, i);
1140 
1141 	/* if there's an actual failure, we should just bail */
1142 	if (rc < 0)
1143 		return;
1144 
1145 out:
1146 	if (ntb_link_is_up(ndev, NULL, NULL) == 1)
1147 		schedule_delayed_work(&nt->link_work,
1148 				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
1149 }
1150 
ntb_qp_link_work(struct work_struct * work)1151 static void ntb_qp_link_work(struct work_struct *work)
1152 {
1153 	struct ntb_transport_qp *qp = container_of(work,
1154 						   struct ntb_transport_qp,
1155 						   link_work.work);
1156 	struct pci_dev *pdev = qp->ndev->pdev;
1157 	struct ntb_transport_ctx *nt = qp->transport;
1158 	int val;
1159 
1160 	WARN_ON(!nt->link_is_up);
1161 
1162 	val = ntb_spad_read(nt->ndev, QP_LINKS);
1163 
1164 	ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num));
1165 
1166 	/* query remote spad for qp ready bits */
1167 	dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
1168 
1169 	/* See if the remote side is up */
1170 	if (val & BIT(qp->qp_num)) {
1171 		dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
1172 		qp->link_is_up = true;
1173 		qp->active = true;
1174 
1175 		if (qp->event_handler)
1176 			qp->event_handler(qp->cb_data, qp->link_is_up);
1177 
1178 		if (qp->active)
1179 			tasklet_schedule(&qp->rxc_db_work);
1180 	} else if (nt->link_is_up)
1181 		schedule_delayed_work(&qp->link_work,
1182 				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
1183 }
1184 
ntb_transport_init_queue(struct ntb_transport_ctx * nt,unsigned int qp_num)1185 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
1186 				    unsigned int qp_num)
1187 {
1188 	struct ntb_transport_qp *qp;
1189 	phys_addr_t mw_base;
1190 	resource_size_t mw_size;
1191 	unsigned int num_qps_mw, tx_size;
1192 	unsigned int mw_num, mw_count, qp_count;
1193 	u64 qp_offset;
1194 
1195 	mw_count = nt->mw_count;
1196 	qp_count = nt->qp_count;
1197 
1198 	mw_num = QP_TO_MW(nt, qp_num);
1199 
1200 	qp = &nt->qp_vec[qp_num];
1201 	qp->qp_num = qp_num;
1202 	qp->transport = nt;
1203 	qp->ndev = nt->ndev;
1204 	qp->client_ready = false;
1205 	qp->event_handler = NULL;
1206 	ntb_qp_link_context_reset(qp);
1207 
1208 	if (mw_num < qp_count % mw_count)
1209 		num_qps_mw = qp_count / mw_count + 1;
1210 	else
1211 		num_qps_mw = qp_count / mw_count;
1212 
1213 	mw_base = nt->mw_vec[mw_num].phys_addr;
1214 	mw_size = nt->mw_vec[mw_num].phys_size;
1215 
1216 	if (max_mw_size && mw_size > max_mw_size)
1217 		mw_size = max_mw_size;
1218 
1219 	tx_size = (unsigned int)mw_size / num_qps_mw;
1220 	qp_offset = tx_size * (qp_num / mw_count);
1221 
1222 	qp->tx_mw_size = tx_size;
1223 	qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
1224 	if (!qp->tx_mw)
1225 		return -EINVAL;
1226 
1227 	qp->tx_mw_phys = mw_base + qp_offset;
1228 	if (!qp->tx_mw_phys)
1229 		return -EINVAL;
1230 
1231 	tx_size -= sizeof(struct ntb_rx_info);
1232 	qp->rx_info = qp->tx_mw + tx_size;
1233 
1234 	/* Due to housekeeping, there must be atleast 2 buffs */
1235 	qp->tx_max_frame = min(transport_mtu, tx_size / 2);
1236 	qp->tx_max_entry = tx_size / qp->tx_max_frame;
1237 
1238 	if (nt->debugfs_node_dir) {
1239 		char debugfs_name[4];
1240 
1241 		snprintf(debugfs_name, 4, "qp%d", qp_num);
1242 		qp->debugfs_dir = debugfs_create_dir(debugfs_name,
1243 						     nt->debugfs_node_dir);
1244 
1245 		qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
1246 							qp->debugfs_dir, qp,
1247 							&ntb_qp_debugfs_stats);
1248 	} else {
1249 		qp->debugfs_dir = NULL;
1250 		qp->debugfs_stats = NULL;
1251 	}
1252 
1253 	INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
1254 	INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
1255 
1256 	spin_lock_init(&qp->ntb_rx_q_lock);
1257 	spin_lock_init(&qp->ntb_tx_free_q_lock);
1258 
1259 	INIT_LIST_HEAD(&qp->rx_post_q);
1260 	INIT_LIST_HEAD(&qp->rx_pend_q);
1261 	INIT_LIST_HEAD(&qp->rx_free_q);
1262 	INIT_LIST_HEAD(&qp->tx_free_q);
1263 
1264 	tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
1265 		     (unsigned long)qp);
1266 
1267 	return 0;
1268 }
1269 
ntb_transport_probe(struct ntb_client * self,struct ntb_dev * ndev)1270 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1271 {
1272 	struct ntb_transport_ctx *nt;
1273 	struct ntb_transport_mw *mw;
1274 	unsigned int mw_count, qp_count, spad_count, max_mw_count_for_spads;
1275 	u64 qp_bitmap;
1276 	int node;
1277 	int rc, i;
1278 
1279 	mw_count = ntb_peer_mw_count(ndev);
1280 
1281 	if (!ndev->ops->mw_set_trans) {
1282 		dev_err(&ndev->dev, "Inbound MW based NTB API is required\n");
1283 		return -EINVAL;
1284 	}
1285 
1286 	if (ntb_db_is_unsafe(ndev))
1287 		dev_dbg(&ndev->dev,
1288 			"doorbell is unsafe, proceed anyway...\n");
1289 	if (ntb_spad_is_unsafe(ndev))
1290 		dev_dbg(&ndev->dev,
1291 			"scratchpad is unsafe, proceed anyway...\n");
1292 
1293 	if (ntb_peer_port_count(ndev) != NTB_DEF_PEER_CNT)
1294 		dev_warn(&ndev->dev, "Multi-port NTB devices unsupported\n");
1295 
1296 	node = dev_to_node(&ndev->dev);
1297 
1298 	nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
1299 	if (!nt)
1300 		return -ENOMEM;
1301 
1302 	nt->ndev = ndev;
1303 
1304 	/*
1305 	 * If we are using MSI, and have at least one extra memory window,
1306 	 * we will reserve the last MW for the MSI window.
1307 	 */
1308 	if (use_msi && mw_count > 1) {
1309 		rc = ntb_msi_init(ndev, ntb_transport_msi_desc_changed);
1310 		if (!rc) {
1311 			mw_count -= 1;
1312 			nt->use_msi = true;
1313 		}
1314 	}
1315 
1316 	spad_count = ntb_spad_count(ndev);
1317 
1318 	/* Limit the MW's based on the availability of scratchpads */
1319 
1320 	if (spad_count < NTB_TRANSPORT_MIN_SPADS) {
1321 		nt->mw_count = 0;
1322 		rc = -EINVAL;
1323 		goto err;
1324 	}
1325 
1326 	max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2;
1327 	nt->mw_count = min(mw_count, max_mw_count_for_spads);
1328 
1329 	nt->msi_spad_offset = nt->mw_count * 2 + MW0_SZ_HIGH;
1330 
1331 	nt->mw_vec = kcalloc_node(mw_count, sizeof(*nt->mw_vec),
1332 				  GFP_KERNEL, node);
1333 	if (!nt->mw_vec) {
1334 		rc = -ENOMEM;
1335 		goto err;
1336 	}
1337 
1338 	for (i = 0; i < mw_count; i++) {
1339 		mw = &nt->mw_vec[i];
1340 
1341 		rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr,
1342 					  &mw->phys_size);
1343 		if (rc)
1344 			goto err1;
1345 
1346 		mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
1347 		if (!mw->vbase) {
1348 			rc = -ENOMEM;
1349 			goto err1;
1350 		}
1351 
1352 		mw->buff_size = 0;
1353 		mw->xlat_size = 0;
1354 		mw->virt_addr = NULL;
1355 		mw->dma_addr = 0;
1356 	}
1357 
1358 	qp_bitmap = ntb_db_valid_mask(ndev);
1359 
1360 	qp_count = ilog2(qp_bitmap);
1361 	if (nt->use_msi) {
1362 		qp_count -= 1;
1363 		nt->msi_db_mask = BIT_ULL(qp_count);
1364 		ntb_db_clear_mask(ndev, nt->msi_db_mask);
1365 	}
1366 
1367 	if (max_num_clients && max_num_clients < qp_count)
1368 		qp_count = max_num_clients;
1369 	else if (nt->mw_count < qp_count)
1370 		qp_count = nt->mw_count;
1371 
1372 	qp_bitmap &= BIT_ULL(qp_count) - 1;
1373 
1374 	nt->qp_count = qp_count;
1375 	nt->qp_bitmap = qp_bitmap;
1376 	nt->qp_bitmap_free = qp_bitmap;
1377 
1378 	nt->qp_vec = kcalloc_node(qp_count, sizeof(*nt->qp_vec),
1379 				  GFP_KERNEL, node);
1380 	if (!nt->qp_vec) {
1381 		rc = -ENOMEM;
1382 		goto err1;
1383 	}
1384 
1385 	if (nt_debugfs_dir) {
1386 		nt->debugfs_node_dir =
1387 			debugfs_create_dir(pci_name(ndev->pdev),
1388 					   nt_debugfs_dir);
1389 	}
1390 
1391 	for (i = 0; i < qp_count; i++) {
1392 		rc = ntb_transport_init_queue(nt, i);
1393 		if (rc)
1394 			goto err2;
1395 	}
1396 
1397 	INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
1398 	INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
1399 
1400 	rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
1401 	if (rc)
1402 		goto err2;
1403 
1404 	INIT_LIST_HEAD(&nt->client_devs);
1405 	rc = ntb_bus_init(nt);
1406 	if (rc)
1407 		goto err3;
1408 
1409 	nt->link_is_up = false;
1410 	ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1411 	ntb_link_event(ndev);
1412 
1413 	return 0;
1414 
1415 err3:
1416 	ntb_clear_ctx(ndev);
1417 err2:
1418 	kfree(nt->qp_vec);
1419 err1:
1420 	while (i--) {
1421 		mw = &nt->mw_vec[i];
1422 		iounmap(mw->vbase);
1423 	}
1424 	kfree(nt->mw_vec);
1425 err:
1426 	kfree(nt);
1427 	return rc;
1428 }
1429 
ntb_transport_free(struct ntb_client * self,struct ntb_dev * ndev)1430 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1431 {
1432 	struct ntb_transport_ctx *nt = ndev->ctx;
1433 	struct ntb_transport_qp *qp;
1434 	u64 qp_bitmap_alloc;
1435 	int i;
1436 
1437 	ntb_transport_link_cleanup(nt);
1438 	cancel_work_sync(&nt->link_cleanup);
1439 	cancel_delayed_work_sync(&nt->link_work);
1440 
1441 	qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
1442 
1443 	/* verify that all the qp's are freed */
1444 	for (i = 0; i < nt->qp_count; i++) {
1445 		qp = &nt->qp_vec[i];
1446 		if (qp_bitmap_alloc & BIT_ULL(i))
1447 			ntb_transport_free_queue(qp);
1448 		debugfs_remove_recursive(qp->debugfs_dir);
1449 	}
1450 
1451 	ntb_link_disable(ndev);
1452 	ntb_clear_ctx(ndev);
1453 
1454 	ntb_bus_remove(nt);
1455 
1456 	for (i = nt->mw_count; i--; ) {
1457 		ntb_free_mw(nt, i);
1458 		iounmap(nt->mw_vec[i].vbase);
1459 	}
1460 
1461 	kfree(nt->qp_vec);
1462 	kfree(nt->mw_vec);
1463 	kfree(nt);
1464 }
1465 
ntb_complete_rxc(struct ntb_transport_qp * qp)1466 static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1467 {
1468 	struct ntb_queue_entry *entry;
1469 	void *cb_data;
1470 	unsigned int len;
1471 	unsigned long irqflags;
1472 
1473 	spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1474 
1475 	while (!list_empty(&qp->rx_post_q)) {
1476 		entry = list_first_entry(&qp->rx_post_q,
1477 					 struct ntb_queue_entry, entry);
1478 		if (!(entry->flags & DESC_DONE_FLAG))
1479 			break;
1480 
1481 		entry->rx_hdr->flags = 0;
1482 		iowrite32(entry->rx_index, &qp->rx_info->entry);
1483 
1484 		cb_data = entry->cb_data;
1485 		len = entry->len;
1486 
1487 		list_move_tail(&entry->entry, &qp->rx_free_q);
1488 
1489 		spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1490 
1491 		if (qp->rx_handler && qp->client_ready)
1492 			qp->rx_handler(qp, qp->cb_data, cb_data, len);
1493 
1494 		spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1495 	}
1496 
1497 	spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1498 }
1499 
ntb_rx_copy_callback(void * data,const struct dmaengine_result * res)1500 static void ntb_rx_copy_callback(void *data,
1501 				 const struct dmaengine_result *res)
1502 {
1503 	struct ntb_queue_entry *entry = data;
1504 
1505 	/* we need to check DMA results if we are using DMA */
1506 	if (res) {
1507 		enum dmaengine_tx_result dma_err = res->result;
1508 
1509 		switch (dma_err) {
1510 		case DMA_TRANS_READ_FAILED:
1511 		case DMA_TRANS_WRITE_FAILED:
1512 			entry->errors++;
1513 			fallthrough;
1514 		case DMA_TRANS_ABORTED:
1515 		{
1516 			struct ntb_transport_qp *qp = entry->qp;
1517 			void *offset = qp->rx_buff + qp->rx_max_frame *
1518 					qp->rx_index;
1519 
1520 			ntb_memcpy_rx(entry, offset);
1521 			qp->rx_memcpy++;
1522 			return;
1523 		}
1524 
1525 		case DMA_TRANS_NOERROR:
1526 		default:
1527 			break;
1528 		}
1529 	}
1530 
1531 	entry->flags |= DESC_DONE_FLAG;
1532 
1533 	ntb_complete_rxc(entry->qp);
1534 }
1535 
ntb_memcpy_rx(struct ntb_queue_entry * entry,void * offset)1536 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1537 {
1538 	void *buf = entry->buf;
1539 	size_t len = entry->len;
1540 
1541 	memcpy(buf, offset, len);
1542 
1543 	/* Ensure that the data is fully copied out before clearing the flag */
1544 	wmb();
1545 
1546 	ntb_rx_copy_callback(entry, NULL);
1547 }
1548 
ntb_async_rx_submit(struct ntb_queue_entry * entry,void * offset)1549 static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
1550 {
1551 	struct dma_async_tx_descriptor *txd;
1552 	struct ntb_transport_qp *qp = entry->qp;
1553 	struct dma_chan *chan = qp->rx_dma_chan;
1554 	struct dma_device *device;
1555 	size_t pay_off, buff_off, len;
1556 	struct dmaengine_unmap_data *unmap;
1557 	dma_cookie_t cookie;
1558 	void *buf = entry->buf;
1559 
1560 	len = entry->len;
1561 	device = chan->device;
1562 	pay_off = (size_t)offset & ~PAGE_MASK;
1563 	buff_off = (size_t)buf & ~PAGE_MASK;
1564 
1565 	if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1566 		goto err;
1567 
1568 	unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1569 	if (!unmap)
1570 		goto err;
1571 
1572 	unmap->len = len;
1573 	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1574 				      pay_off, len, DMA_TO_DEVICE);
1575 	if (dma_mapping_error(device->dev, unmap->addr[0]))
1576 		goto err_get_unmap;
1577 
1578 	unmap->to_cnt = 1;
1579 
1580 	unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1581 				      buff_off, len, DMA_FROM_DEVICE);
1582 	if (dma_mapping_error(device->dev, unmap->addr[1]))
1583 		goto err_get_unmap;
1584 
1585 	unmap->from_cnt = 1;
1586 
1587 	txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1588 					     unmap->addr[0], len,
1589 					     DMA_PREP_INTERRUPT);
1590 	if (!txd)
1591 		goto err_get_unmap;
1592 
1593 	txd->callback_result = ntb_rx_copy_callback;
1594 	txd->callback_param = entry;
1595 	dma_set_unmap(txd, unmap);
1596 
1597 	cookie = dmaengine_submit(txd);
1598 	if (dma_submit_error(cookie))
1599 		goto err_set_unmap;
1600 
1601 	dmaengine_unmap_put(unmap);
1602 
1603 	qp->last_cookie = cookie;
1604 
1605 	qp->rx_async++;
1606 
1607 	return 0;
1608 
1609 err_set_unmap:
1610 	dmaengine_unmap_put(unmap);
1611 err_get_unmap:
1612 	dmaengine_unmap_put(unmap);
1613 err:
1614 	return -ENXIO;
1615 }
1616 
ntb_async_rx(struct ntb_queue_entry * entry,void * offset)1617 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1618 {
1619 	struct ntb_transport_qp *qp = entry->qp;
1620 	struct dma_chan *chan = qp->rx_dma_chan;
1621 	int res;
1622 
1623 	if (!chan)
1624 		goto err;
1625 
1626 	if (entry->len < copy_bytes)
1627 		goto err;
1628 
1629 	res = ntb_async_rx_submit(entry, offset);
1630 	if (res < 0)
1631 		goto err;
1632 
1633 	if (!entry->retries)
1634 		qp->rx_async++;
1635 
1636 	return;
1637 
1638 err:
1639 	ntb_memcpy_rx(entry, offset);
1640 	qp->rx_memcpy++;
1641 }
1642 
ntb_process_rxc(struct ntb_transport_qp * qp)1643 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1644 {
1645 	struct ntb_payload_header *hdr;
1646 	struct ntb_queue_entry *entry;
1647 	void *offset;
1648 
1649 	offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1650 	hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1651 
1652 	dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
1653 		qp->qp_num, hdr->ver, hdr->len, hdr->flags);
1654 
1655 	if (!(hdr->flags & DESC_DONE_FLAG)) {
1656 		dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
1657 		qp->rx_ring_empty++;
1658 		return -EAGAIN;
1659 	}
1660 
1661 	if (hdr->flags & LINK_DOWN_FLAG) {
1662 		dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
1663 		ntb_qp_link_down(qp);
1664 		hdr->flags = 0;
1665 		return -EAGAIN;
1666 	}
1667 
1668 	if (hdr->ver != (u32)qp->rx_pkts) {
1669 		dev_dbg(&qp->ndev->pdev->dev,
1670 			"version mismatch, expected %llu - got %u\n",
1671 			qp->rx_pkts, hdr->ver);
1672 		qp->rx_err_ver++;
1673 		return -EIO;
1674 	}
1675 
1676 	entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
1677 	if (!entry) {
1678 		dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1679 		qp->rx_err_no_buf++;
1680 		return -EAGAIN;
1681 	}
1682 
1683 	entry->rx_hdr = hdr;
1684 	entry->rx_index = qp->rx_index;
1685 
1686 	if (hdr->len > entry->len) {
1687 		dev_dbg(&qp->ndev->pdev->dev,
1688 			"receive buffer overflow! Wanted %d got %d\n",
1689 			hdr->len, entry->len);
1690 		qp->rx_err_oflow++;
1691 
1692 		entry->len = -EIO;
1693 		entry->flags |= DESC_DONE_FLAG;
1694 
1695 		ntb_complete_rxc(qp);
1696 	} else {
1697 		dev_dbg(&qp->ndev->pdev->dev,
1698 			"RX OK index %u ver %u size %d into buf size %d\n",
1699 			qp->rx_index, hdr->ver, hdr->len, entry->len);
1700 
1701 		qp->rx_bytes += hdr->len;
1702 		qp->rx_pkts++;
1703 
1704 		entry->len = hdr->len;
1705 
1706 		ntb_async_rx(entry, offset);
1707 	}
1708 
1709 	qp->rx_index++;
1710 	qp->rx_index %= qp->rx_max_entry;
1711 
1712 	return 0;
1713 }
1714 
ntb_transport_rxc_db(unsigned long data)1715 static void ntb_transport_rxc_db(unsigned long data)
1716 {
1717 	struct ntb_transport_qp *qp = (void *)data;
1718 	int rc, i;
1719 
1720 	dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
1721 		__func__, qp->qp_num);
1722 
1723 	/* Limit the number of packets processed in a single interrupt to
1724 	 * provide fairness to others
1725 	 */
1726 	for (i = 0; i < qp->rx_max_entry; i++) {
1727 		rc = ntb_process_rxc(qp);
1728 		if (rc)
1729 			break;
1730 	}
1731 
1732 	if (i && qp->rx_dma_chan)
1733 		dma_async_issue_pending(qp->rx_dma_chan);
1734 
1735 	if (i == qp->rx_max_entry) {
1736 		/* there is more work to do */
1737 		if (qp->active)
1738 			tasklet_schedule(&qp->rxc_db_work);
1739 	} else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
1740 		/* the doorbell bit is set: clear it */
1741 		ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
1742 		/* ntb_db_read ensures ntb_db_clear write is committed */
1743 		ntb_db_read(qp->ndev);
1744 
1745 		/* an interrupt may have arrived between finishing
1746 		 * ntb_process_rxc and clearing the doorbell bit:
1747 		 * there might be some more work to do.
1748 		 */
1749 		if (qp->active)
1750 			tasklet_schedule(&qp->rxc_db_work);
1751 	}
1752 }
1753 
ntb_tx_copy_callback(void * data,const struct dmaengine_result * res)1754 static void ntb_tx_copy_callback(void *data,
1755 				 const struct dmaengine_result *res)
1756 {
1757 	struct ntb_queue_entry *entry = data;
1758 	struct ntb_transport_qp *qp = entry->qp;
1759 	struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1760 
1761 	/* we need to check DMA results if we are using DMA */
1762 	if (res) {
1763 		enum dmaengine_tx_result dma_err = res->result;
1764 
1765 		switch (dma_err) {
1766 		case DMA_TRANS_READ_FAILED:
1767 		case DMA_TRANS_WRITE_FAILED:
1768 			entry->errors++;
1769 			fallthrough;
1770 		case DMA_TRANS_ABORTED:
1771 		{
1772 			void __iomem *offset =
1773 				qp->tx_mw + qp->tx_max_frame *
1774 				entry->tx_index;
1775 
1776 			/* resubmit via CPU */
1777 			ntb_memcpy_tx(entry, offset);
1778 			qp->tx_memcpy++;
1779 			return;
1780 		}
1781 
1782 		case DMA_TRANS_NOERROR:
1783 		default:
1784 			break;
1785 		}
1786 	}
1787 
1788 	iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1789 
1790 	if (qp->use_msi)
1791 		ntb_msi_peer_trigger(qp->ndev, PIDX, &qp->peer_msi_desc);
1792 	else
1793 		ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
1794 
1795 	/* The entry length can only be zero if the packet is intended to be a
1796 	 * "link down" or similar.  Since no payload is being sent in these
1797 	 * cases, there is nothing to add to the completion queue.
1798 	 */
1799 	if (entry->len > 0) {
1800 		qp->tx_bytes += entry->len;
1801 
1802 		if (qp->tx_handler)
1803 			qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1804 				       entry->len);
1805 	}
1806 
1807 	ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1808 }
1809 
ntb_memcpy_tx(struct ntb_queue_entry * entry,void __iomem * offset)1810 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1811 {
1812 #ifdef ARCH_HAS_NOCACHE_UACCESS
1813 	/*
1814 	 * Using non-temporal mov to improve performance on non-cached
1815 	 * writes, even though we aren't actually copying from user space.
1816 	 */
1817 	__copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
1818 #else
1819 	memcpy_toio(offset, entry->buf, entry->len);
1820 #endif
1821 
1822 	/* Ensure that the data is fully copied out before setting the flags */
1823 	wmb();
1824 
1825 	ntb_tx_copy_callback(entry, NULL);
1826 }
1827 
ntb_async_tx_submit(struct ntb_transport_qp * qp,struct ntb_queue_entry * entry)1828 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1829 			       struct ntb_queue_entry *entry)
1830 {
1831 	struct dma_async_tx_descriptor *txd;
1832 	struct dma_chan *chan = qp->tx_dma_chan;
1833 	struct dma_device *device;
1834 	size_t len = entry->len;
1835 	void *buf = entry->buf;
1836 	size_t dest_off, buff_off;
1837 	struct dmaengine_unmap_data *unmap;
1838 	dma_addr_t dest;
1839 	dma_cookie_t cookie;
1840 
1841 	device = chan->device;
1842 	dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index;
1843 	buff_off = (size_t)buf & ~PAGE_MASK;
1844 	dest_off = (size_t)dest & ~PAGE_MASK;
1845 
1846 	if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1847 		goto err;
1848 
1849 	unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1850 	if (!unmap)
1851 		goto err;
1852 
1853 	unmap->len = len;
1854 	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1855 				      buff_off, len, DMA_TO_DEVICE);
1856 	if (dma_mapping_error(device->dev, unmap->addr[0]))
1857 		goto err_get_unmap;
1858 
1859 	unmap->to_cnt = 1;
1860 
1861 	txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1862 					     DMA_PREP_INTERRUPT);
1863 	if (!txd)
1864 		goto err_get_unmap;
1865 
1866 	txd->callback_result = ntb_tx_copy_callback;
1867 	txd->callback_param = entry;
1868 	dma_set_unmap(txd, unmap);
1869 
1870 	cookie = dmaengine_submit(txd);
1871 	if (dma_submit_error(cookie))
1872 		goto err_set_unmap;
1873 
1874 	dmaengine_unmap_put(unmap);
1875 
1876 	dma_async_issue_pending(chan);
1877 
1878 	return 0;
1879 err_set_unmap:
1880 	dmaengine_unmap_put(unmap);
1881 err_get_unmap:
1882 	dmaengine_unmap_put(unmap);
1883 err:
1884 	return -ENXIO;
1885 }
1886 
ntb_async_tx(struct ntb_transport_qp * qp,struct ntb_queue_entry * entry)1887 static void ntb_async_tx(struct ntb_transport_qp *qp,
1888 			 struct ntb_queue_entry *entry)
1889 {
1890 	struct ntb_payload_header __iomem *hdr;
1891 	struct dma_chan *chan = qp->tx_dma_chan;
1892 	void __iomem *offset;
1893 	int res;
1894 
1895 	entry->tx_index = qp->tx_index;
1896 	offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
1897 	hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1898 	entry->tx_hdr = hdr;
1899 
1900 	iowrite32(entry->len, &hdr->len);
1901 	iowrite32((u32)qp->tx_pkts, &hdr->ver);
1902 
1903 	if (!chan)
1904 		goto err;
1905 
1906 	if (entry->len < copy_bytes)
1907 		goto err;
1908 
1909 	res = ntb_async_tx_submit(qp, entry);
1910 	if (res < 0)
1911 		goto err;
1912 
1913 	if (!entry->retries)
1914 		qp->tx_async++;
1915 
1916 	return;
1917 
1918 err:
1919 	ntb_memcpy_tx(entry, offset);
1920 	qp->tx_memcpy++;
1921 }
1922 
ntb_process_tx(struct ntb_transport_qp * qp,struct ntb_queue_entry * entry)1923 static int ntb_process_tx(struct ntb_transport_qp *qp,
1924 			  struct ntb_queue_entry *entry)
1925 {
1926 	if (!ntb_transport_tx_free_entry(qp)) {
1927 		qp->tx_ring_full++;
1928 		return -EAGAIN;
1929 	}
1930 
1931 	if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1932 		if (qp->tx_handler)
1933 			qp->tx_handler(qp, qp->cb_data, NULL, -EIO);
1934 
1935 		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1936 			     &qp->tx_free_q);
1937 		return 0;
1938 	}
1939 
1940 	ntb_async_tx(qp, entry);
1941 
1942 	qp->tx_index++;
1943 	qp->tx_index %= qp->tx_max_entry;
1944 
1945 	qp->tx_pkts++;
1946 
1947 	return 0;
1948 }
1949 
ntb_send_link_down(struct ntb_transport_qp * qp)1950 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1951 {
1952 	struct pci_dev *pdev = qp->ndev->pdev;
1953 	struct ntb_queue_entry *entry;
1954 	int i, rc;
1955 
1956 	if (!qp->link_is_up)
1957 		return;
1958 
1959 	dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
1960 
1961 	for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1962 		entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1963 		if (entry)
1964 			break;
1965 		msleep(100);
1966 	}
1967 
1968 	if (!entry)
1969 		return;
1970 
1971 	entry->cb_data = NULL;
1972 	entry->buf = NULL;
1973 	entry->len = 0;
1974 	entry->flags = LINK_DOWN_FLAG;
1975 
1976 	rc = ntb_process_tx(qp, entry);
1977 	if (rc)
1978 		dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1979 			qp->qp_num);
1980 
1981 	ntb_qp_link_down_reset(qp);
1982 }
1983 
ntb_dma_filter_fn(struct dma_chan * chan,void * node)1984 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
1985 {
1986 	return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
1987 }
1988 
1989 /**
1990  * ntb_transport_create_queue - Create a new NTB transport layer queue
1991  * @data: pointer for callback data
1992  * @client_dev: &struct device pointer
1993  * @handlers: pointer to various ntb queue (callback) handlers
1994  *
1995  * Create a new NTB transport layer queue and provide the queue with a callback
1996  * routine for both transmit and receive.  The receive callback routine will be
1997  * used to pass up data when the transport has received it on the queue.   The
1998  * transmit callback routine will be called when the transport has completed the
1999  * transmission of the data on the queue and the data is ready to be freed.
2000  *
2001  * RETURNS: pointer to newly created ntb_queue, NULL on error.
2002  */
2003 struct ntb_transport_qp *
ntb_transport_create_queue(void * data,struct device * client_dev,const struct ntb_queue_handlers * handlers)2004 ntb_transport_create_queue(void *data, struct device *client_dev,
2005 			   const struct ntb_queue_handlers *handlers)
2006 {
2007 	struct ntb_dev *ndev;
2008 	struct pci_dev *pdev;
2009 	struct ntb_transport_ctx *nt;
2010 	struct ntb_queue_entry *entry;
2011 	struct ntb_transport_qp *qp;
2012 	u64 qp_bit;
2013 	unsigned int free_queue;
2014 	dma_cap_mask_t dma_mask;
2015 	int node;
2016 	int i;
2017 
2018 	ndev = dev_ntb(client_dev->parent);
2019 	pdev = ndev->pdev;
2020 	nt = ndev->ctx;
2021 
2022 	node = dev_to_node(&ndev->dev);
2023 
2024 	free_queue = ffs(nt->qp_bitmap_free);
2025 	if (!free_queue)
2026 		goto err;
2027 
2028 	/* decrement free_queue to make it zero based */
2029 	free_queue--;
2030 
2031 	qp = &nt->qp_vec[free_queue];
2032 	qp_bit = BIT_ULL(qp->qp_num);
2033 
2034 	nt->qp_bitmap_free &= ~qp_bit;
2035 
2036 	qp->cb_data = data;
2037 	qp->rx_handler = handlers->rx_handler;
2038 	qp->tx_handler = handlers->tx_handler;
2039 	qp->event_handler = handlers->event_handler;
2040 
2041 	dma_cap_zero(dma_mask);
2042 	dma_cap_set(DMA_MEMCPY, dma_mask);
2043 
2044 	if (use_dma) {
2045 		qp->tx_dma_chan =
2046 			dma_request_channel(dma_mask, ntb_dma_filter_fn,
2047 					    (void *)(unsigned long)node);
2048 		if (!qp->tx_dma_chan)
2049 			dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n");
2050 
2051 		qp->rx_dma_chan =
2052 			dma_request_channel(dma_mask, ntb_dma_filter_fn,
2053 					    (void *)(unsigned long)node);
2054 		if (!qp->rx_dma_chan)
2055 			dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n");
2056 	} else {
2057 		qp->tx_dma_chan = NULL;
2058 		qp->rx_dma_chan = NULL;
2059 	}
2060 
2061 	qp->tx_mw_dma_addr = 0;
2062 	if (qp->tx_dma_chan) {
2063 		qp->tx_mw_dma_addr =
2064 			dma_map_resource(qp->tx_dma_chan->device->dev,
2065 					 qp->tx_mw_phys, qp->tx_mw_size,
2066 					 DMA_FROM_DEVICE, 0);
2067 		if (dma_mapping_error(qp->tx_dma_chan->device->dev,
2068 				      qp->tx_mw_dma_addr)) {
2069 			qp->tx_mw_dma_addr = 0;
2070 			goto err1;
2071 		}
2072 	}
2073 
2074 	dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
2075 		qp->tx_dma_chan ? "DMA" : "CPU");
2076 
2077 	dev_dbg(&pdev->dev, "Using %s memcpy for RX\n",
2078 		qp->rx_dma_chan ? "DMA" : "CPU");
2079 
2080 	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
2081 		entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
2082 		if (!entry)
2083 			goto err1;
2084 
2085 		entry->qp = qp;
2086 		ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
2087 			     &qp->rx_free_q);
2088 	}
2089 	qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
2090 
2091 	for (i = 0; i < qp->tx_max_entry; i++) {
2092 		entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
2093 		if (!entry)
2094 			goto err2;
2095 
2096 		entry->qp = qp;
2097 		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
2098 			     &qp->tx_free_q);
2099 	}
2100 
2101 	ntb_db_clear(qp->ndev, qp_bit);
2102 	ntb_db_clear_mask(qp->ndev, qp_bit);
2103 
2104 	dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
2105 
2106 	return qp;
2107 
2108 err2:
2109 	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
2110 		kfree(entry);
2111 err1:
2112 	qp->rx_alloc_entry = 0;
2113 	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
2114 		kfree(entry);
2115 	if (qp->tx_mw_dma_addr)
2116 		dma_unmap_resource(qp->tx_dma_chan->device->dev,
2117 				   qp->tx_mw_dma_addr, qp->tx_mw_size,
2118 				   DMA_FROM_DEVICE, 0);
2119 	if (qp->tx_dma_chan)
2120 		dma_release_channel(qp->tx_dma_chan);
2121 	if (qp->rx_dma_chan)
2122 		dma_release_channel(qp->rx_dma_chan);
2123 	nt->qp_bitmap_free |= qp_bit;
2124 err:
2125 	return NULL;
2126 }
2127 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
2128 
2129 /**
2130  * ntb_transport_free_queue - Frees NTB transport queue
2131  * @qp: NTB queue to be freed
2132  *
2133  * Frees NTB transport queue
2134  */
ntb_transport_free_queue(struct ntb_transport_qp * qp)2135 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
2136 {
2137 	struct pci_dev *pdev;
2138 	struct ntb_queue_entry *entry;
2139 	u64 qp_bit;
2140 
2141 	if (!qp)
2142 		return;
2143 
2144 	pdev = qp->ndev->pdev;
2145 
2146 	qp->active = false;
2147 
2148 	if (qp->tx_dma_chan) {
2149 		struct dma_chan *chan = qp->tx_dma_chan;
2150 		/* Putting the dma_chan to NULL will force any new traffic to be
2151 		 * processed by the CPU instead of the DAM engine
2152 		 */
2153 		qp->tx_dma_chan = NULL;
2154 
2155 		/* Try to be nice and wait for any queued DMA engine
2156 		 * transactions to process before smashing it with a rock
2157 		 */
2158 		dma_sync_wait(chan, qp->last_cookie);
2159 		dmaengine_terminate_all(chan);
2160 
2161 		dma_unmap_resource(chan->device->dev,
2162 				   qp->tx_mw_dma_addr, qp->tx_mw_size,
2163 				   DMA_FROM_DEVICE, 0);
2164 
2165 		dma_release_channel(chan);
2166 	}
2167 
2168 	if (qp->rx_dma_chan) {
2169 		struct dma_chan *chan = qp->rx_dma_chan;
2170 		/* Putting the dma_chan to NULL will force any new traffic to be
2171 		 * processed by the CPU instead of the DAM engine
2172 		 */
2173 		qp->rx_dma_chan = NULL;
2174 
2175 		/* Try to be nice and wait for any queued DMA engine
2176 		 * transactions to process before smashing it with a rock
2177 		 */
2178 		dma_sync_wait(chan, qp->last_cookie);
2179 		dmaengine_terminate_all(chan);
2180 		dma_release_channel(chan);
2181 	}
2182 
2183 	qp_bit = BIT_ULL(qp->qp_num);
2184 
2185 	ntb_db_set_mask(qp->ndev, qp_bit);
2186 	tasklet_kill(&qp->rxc_db_work);
2187 
2188 	cancel_delayed_work_sync(&qp->link_work);
2189 
2190 	qp->cb_data = NULL;
2191 	qp->rx_handler = NULL;
2192 	qp->tx_handler = NULL;
2193 	qp->event_handler = NULL;
2194 
2195 	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
2196 		kfree(entry);
2197 
2198 	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
2199 		dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
2200 		kfree(entry);
2201 	}
2202 
2203 	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
2204 		dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
2205 		kfree(entry);
2206 	}
2207 
2208 	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
2209 		kfree(entry);
2210 
2211 	qp->transport->qp_bitmap_free |= qp_bit;
2212 
2213 	dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
2214 }
2215 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
2216 
2217 /**
2218  * ntb_transport_rx_remove - Dequeues enqueued rx packet
2219  * @qp: NTB queue to be freed
2220  * @len: pointer to variable to write enqueued buffers length
2221  *
2222  * Dequeues unused buffers from receive queue.  Should only be used during
2223  * shutdown of qp.
2224  *
2225  * RETURNS: NULL error value on error, or void* for success.
2226  */
ntb_transport_rx_remove(struct ntb_transport_qp * qp,unsigned int * len)2227 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
2228 {
2229 	struct ntb_queue_entry *entry;
2230 	void *buf;
2231 
2232 	if (!qp || qp->client_ready)
2233 		return NULL;
2234 
2235 	entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
2236 	if (!entry)
2237 		return NULL;
2238 
2239 	buf = entry->cb_data;
2240 	*len = entry->len;
2241 
2242 	ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
2243 
2244 	return buf;
2245 }
2246 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
2247 
2248 /**
2249  * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
2250  * @qp: NTB transport layer queue the entry is to be enqueued on
2251  * @cb: per buffer pointer for callback function to use
2252  * @data: pointer to data buffer that incoming packets will be copied into
2253  * @len: length of the data buffer
2254  *
2255  * Enqueue a new receive buffer onto the transport queue into which a NTB
2256  * payload can be received into.
2257  *
2258  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2259  */
ntb_transport_rx_enqueue(struct ntb_transport_qp * qp,void * cb,void * data,unsigned int len)2260 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
2261 			     unsigned int len)
2262 {
2263 	struct ntb_queue_entry *entry;
2264 
2265 	if (!qp)
2266 		return -EINVAL;
2267 
2268 	entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
2269 	if (!entry)
2270 		return -ENOMEM;
2271 
2272 	entry->cb_data = cb;
2273 	entry->buf = data;
2274 	entry->len = len;
2275 	entry->flags = 0;
2276 	entry->retries = 0;
2277 	entry->errors = 0;
2278 	entry->rx_index = 0;
2279 
2280 	ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
2281 
2282 	if (qp->active)
2283 		tasklet_schedule(&qp->rxc_db_work);
2284 
2285 	return 0;
2286 }
2287 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
2288 
2289 /**
2290  * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
2291  * @qp: NTB transport layer queue the entry is to be enqueued on
2292  * @cb: per buffer pointer for callback function to use
2293  * @data: pointer to data buffer that will be sent
2294  * @len: length of the data buffer
2295  *
2296  * Enqueue a new transmit buffer onto the transport queue from which a NTB
2297  * payload will be transmitted.  This assumes that a lock is being held to
2298  * serialize access to the qp.
2299  *
2300  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2301  */
ntb_transport_tx_enqueue(struct ntb_transport_qp * qp,void * cb,void * data,unsigned int len)2302 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
2303 			     unsigned int len)
2304 {
2305 	struct ntb_queue_entry *entry;
2306 	int rc;
2307 
2308 	if (!qp || !len)
2309 		return -EINVAL;
2310 
2311 	/* If the qp link is down already, just ignore. */
2312 	if (!qp->link_is_up)
2313 		return 0;
2314 
2315 	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
2316 	if (!entry) {
2317 		qp->tx_err_no_buf++;
2318 		return -EBUSY;
2319 	}
2320 
2321 	entry->cb_data = cb;
2322 	entry->buf = data;
2323 	entry->len = len;
2324 	entry->flags = 0;
2325 	entry->errors = 0;
2326 	entry->retries = 0;
2327 	entry->tx_index = 0;
2328 
2329 	rc = ntb_process_tx(qp, entry);
2330 	if (rc)
2331 		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
2332 			     &qp->tx_free_q);
2333 
2334 	return rc;
2335 }
2336 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
2337 
2338 /**
2339  * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
2340  * @qp: NTB transport layer queue to be enabled
2341  *
2342  * Notify NTB transport layer of client readiness to use queue
2343  */
ntb_transport_link_up(struct ntb_transport_qp * qp)2344 void ntb_transport_link_up(struct ntb_transport_qp *qp)
2345 {
2346 	if (!qp)
2347 		return;
2348 
2349 	qp->client_ready = true;
2350 
2351 	if (qp->transport->link_is_up)
2352 		schedule_delayed_work(&qp->link_work, 0);
2353 }
2354 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
2355 
2356 /**
2357  * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
2358  * @qp: NTB transport layer queue to be disabled
2359  *
2360  * Notify NTB transport layer of client's desire to no longer receive data on
2361  * transport queue specified.  It is the client's responsibility to ensure all
2362  * entries on queue are purged or otherwise handled appropriately.
2363  */
ntb_transport_link_down(struct ntb_transport_qp * qp)2364 void ntb_transport_link_down(struct ntb_transport_qp *qp)
2365 {
2366 	int val;
2367 
2368 	if (!qp)
2369 		return;
2370 
2371 	qp->client_ready = false;
2372 
2373 	val = ntb_spad_read(qp->ndev, QP_LINKS);
2374 
2375 	ntb_peer_spad_write(qp->ndev, PIDX, QP_LINKS, val & ~BIT(qp->qp_num));
2376 
2377 	if (qp->link_is_up)
2378 		ntb_send_link_down(qp);
2379 	else
2380 		cancel_delayed_work_sync(&qp->link_work);
2381 }
2382 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
2383 
2384 /**
2385  * ntb_transport_link_query - Query transport link state
2386  * @qp: NTB transport layer queue to be queried
2387  *
2388  * Query connectivity to the remote system of the NTB transport queue
2389  *
2390  * RETURNS: true for link up or false for link down
2391  */
ntb_transport_link_query(struct ntb_transport_qp * qp)2392 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
2393 {
2394 	if (!qp)
2395 		return false;
2396 
2397 	return qp->link_is_up;
2398 }
2399 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
2400 
2401 /**
2402  * ntb_transport_qp_num - Query the qp number
2403  * @qp: NTB transport layer queue to be queried
2404  *
2405  * Query qp number of the NTB transport queue
2406  *
2407  * RETURNS: a zero based number specifying the qp number
2408  */
ntb_transport_qp_num(struct ntb_transport_qp * qp)2409 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
2410 {
2411 	if (!qp)
2412 		return 0;
2413 
2414 	return qp->qp_num;
2415 }
2416 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
2417 
2418 /**
2419  * ntb_transport_max_size - Query the max payload size of a qp
2420  * @qp: NTB transport layer queue to be queried
2421  *
2422  * Query the maximum payload size permissible on the given qp
2423  *
2424  * RETURNS: the max payload size of a qp
2425  */
ntb_transport_max_size(struct ntb_transport_qp * qp)2426 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
2427 {
2428 	unsigned int max_size;
2429 	unsigned int copy_align;
2430 	struct dma_chan *rx_chan, *tx_chan;
2431 
2432 	if (!qp)
2433 		return 0;
2434 
2435 	rx_chan = qp->rx_dma_chan;
2436 	tx_chan = qp->tx_dma_chan;
2437 
2438 	copy_align = max(rx_chan ? rx_chan->device->copy_align : 0,
2439 			 tx_chan ? tx_chan->device->copy_align : 0);
2440 
2441 	/* If DMA engine usage is possible, try to find the max size for that */
2442 	max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header);
2443 	max_size = round_down(max_size, 1 << copy_align);
2444 
2445 	return max_size;
2446 }
2447 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
2448 
ntb_transport_tx_free_entry(struct ntb_transport_qp * qp)2449 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
2450 {
2451 	unsigned int head = qp->tx_index;
2452 	unsigned int tail = qp->remote_rx_info->entry;
2453 
2454 	return tail >= head ? tail - head : qp->tx_max_entry + tail - head;
2455 }
2456 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
2457 
ntb_transport_doorbell_callback(void * data,int vector)2458 static void ntb_transport_doorbell_callback(void *data, int vector)
2459 {
2460 	struct ntb_transport_ctx *nt = data;
2461 	struct ntb_transport_qp *qp;
2462 	u64 db_bits;
2463 	unsigned int qp_num;
2464 
2465 	if (ntb_db_read(nt->ndev) & nt->msi_db_mask) {
2466 		ntb_transport_msi_peer_desc_changed(nt);
2467 		ntb_db_clear(nt->ndev, nt->msi_db_mask);
2468 	}
2469 
2470 	db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
2471 		   ntb_db_vector_mask(nt->ndev, vector));
2472 
2473 	while (db_bits) {
2474 		qp_num = __ffs(db_bits);
2475 		qp = &nt->qp_vec[qp_num];
2476 
2477 		if (qp->active)
2478 			tasklet_schedule(&qp->rxc_db_work);
2479 
2480 		db_bits &= ~BIT_ULL(qp_num);
2481 	}
2482 }
2483 
2484 static const struct ntb_ctx_ops ntb_transport_ops = {
2485 	.link_event = ntb_transport_event_callback,
2486 	.db_event = ntb_transport_doorbell_callback,
2487 };
2488 
2489 static struct ntb_client ntb_transport_client = {
2490 	.ops = {
2491 		.probe = ntb_transport_probe,
2492 		.remove = ntb_transport_free,
2493 	},
2494 };
2495 
ntb_transport_init(void)2496 static int __init ntb_transport_init(void)
2497 {
2498 	int rc;
2499 
2500 	pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
2501 
2502 	if (debugfs_initialized())
2503 		nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2504 
2505 	rc = bus_register(&ntb_transport_bus);
2506 	if (rc)
2507 		goto err_bus;
2508 
2509 	rc = ntb_register_client(&ntb_transport_client);
2510 	if (rc)
2511 		goto err_client;
2512 
2513 	return 0;
2514 
2515 err_client:
2516 	bus_unregister(&ntb_transport_bus);
2517 err_bus:
2518 	debugfs_remove_recursive(nt_debugfs_dir);
2519 	return rc;
2520 }
2521 module_init(ntb_transport_init);
2522 
ntb_transport_exit(void)2523 static void __exit ntb_transport_exit(void)
2524 {
2525 	ntb_unregister_client(&ntb_transport_client);
2526 	bus_unregister(&ntb_transport_bus);
2527 	debugfs_remove_recursive(nt_debugfs_dir);
2528 }
2529 module_exit(ntb_transport_exit);
2530