1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Endpoint Function Driver to implement Non-Transparent Bridge functionality
4 * Between PCI RC and EP
5 *
6 * Copyright (C) 2020 Texas Instruments
7 * Copyright (C) 2022 NXP
8 *
9 * Based on pci-epf-ntb.c
10 * Author: Frank Li <Frank.Li@nxp.com>
11 * Author: Kishon Vijay Abraham I <kishon@ti.com>
12 */
13
14 /*
15 * +------------+ +---------------------------------------+
16 * | | | |
17 * +------------+ | +--------------+
18 * | NTB | | | NTB |
19 * | NetDev | | | NetDev |
20 * +------------+ | +--------------+
21 * | NTB | | | NTB |
22 * | Transfer | | | Transfer |
23 * +------------+ | +--------------+
24 * | | | | |
25 * | PCI NTB | | | |
26 * | EPF | | | |
27 * | Driver | | | PCI Virtual |
28 * | | +---------------+ | NTB Driver |
29 * | | | PCI EP NTB |<------>| |
30 * | | | FN Driver | | |
31 * +------------+ +---------------+ +--------------+
32 * | | | | | |
33 * | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI |
34 * | | PCI | | | Bus |
35 * +------------+ +---------------+--------+--------------+
36 * PCIe Root Port PCI EP
37 */
38
39 #include <linux/delay.h>
40 #include <linux/io.h>
41 #include <linux/module.h>
42 #include <linux/slab.h>
43
44 #include <linux/pci-epc.h>
45 #include <linux/pci-epf.h>
46 #include <linux/ntb.h>
47
48 static struct workqueue_struct *kpcintb_workqueue;
49
50 #define COMMAND_CONFIGURE_DOORBELL 1
51 #define COMMAND_TEARDOWN_DOORBELL 2
52 #define COMMAND_CONFIGURE_MW 3
53 #define COMMAND_TEARDOWN_MW 4
54 #define COMMAND_LINK_UP 5
55 #define COMMAND_LINK_DOWN 6
56
57 #define COMMAND_STATUS_OK 1
58 #define COMMAND_STATUS_ERROR 2
59
60 #define LINK_STATUS_UP BIT(0)
61
62 #define SPAD_COUNT 64
63 #define DB_COUNT 4
64 #define NTB_MW_OFFSET 2
65 #define DB_COUNT_MASK GENMASK(15, 0)
66 #define MSIX_ENABLE BIT(16)
67 #define MAX_DB_COUNT 32
68 #define MAX_MW 4
69
70 enum epf_ntb_bar {
71 BAR_CONFIG,
72 BAR_DB,
73 BAR_MW0,
74 BAR_MW1,
75 BAR_MW2,
76 };
77
78 /*
79 * +--------------------------------------------------+ Base
80 * | |
81 * | |
82 * | |
83 * | Common Control Register |
84 * | |
85 * | |
86 * | |
87 * +-----------------------+--------------------------+ Base+spad_offset
88 * | | |
89 * | Peer Spad Space | Spad Space |
90 * | | |
91 * | | |
92 * +-----------------------+--------------------------+ Base+spad_offset
93 * | | | +spad_count * 4
94 * | | |
95 * | Spad Space | Peer Spad Space |
96 * | | |
97 * +-----------------------+--------------------------+
98 * Virtual PCI PCIe Endpoint
99 * NTB Driver NTB Driver
100 */
101 struct epf_ntb_ctrl {
102 u32 command;
103 u32 argument;
104 u16 command_status;
105 u16 link_status;
106 u32 topology;
107 u64 addr;
108 u64 size;
109 u32 num_mws;
110 u32 reserved;
111 u32 spad_offset;
112 u32 spad_count;
113 u32 db_entry_size;
114 u32 db_data[MAX_DB_COUNT];
115 u32 db_offset[MAX_DB_COUNT];
116 } __packed;
117
118 struct epf_ntb {
119 struct ntb_dev ntb;
120 struct pci_epf *epf;
121 struct config_group group;
122
123 u32 num_mws;
124 u32 db_count;
125 u32 spad_count;
126 u64 mws_size[MAX_MW];
127 u64 db;
128 u32 vbus_number;
129 u16 vntb_pid;
130 u16 vntb_vid;
131
132 bool linkup;
133 u32 spad_size;
134
135 enum pci_barno epf_ntb_bar[6];
136
137 struct epf_ntb_ctrl *reg;
138
139 u32 *epf_db;
140
141 phys_addr_t vpci_mw_phy[MAX_MW];
142 void __iomem *vpci_mw_addr[MAX_MW];
143
144 struct delayed_work cmd_handler;
145 };
146
147 #define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
148 #define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb)
149
150 static struct pci_epf_header epf_ntb_header = {
151 .vendorid = PCI_ANY_ID,
152 .deviceid = PCI_ANY_ID,
153 .baseclass_code = PCI_BASE_CLASS_MEMORY,
154 .interrupt_pin = PCI_INTERRUPT_INTA,
155 };
156
157 /**
158 * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host (VHOST)
159 * @ntb: NTB device that facilitates communication between HOST and VHOST
160 * @link_up: true or false indicating Link is UP or Down
161 *
162 * Once NTB function in HOST invoke ntb_link_enable(),
163 * this NTB function driver will trigger a link event to VHOST.
164 *
165 * Returns: Zero for success, or an error code in case of failure
166 */
epf_ntb_link_up(struct epf_ntb * ntb,bool link_up)167 static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
168 {
169 if (link_up)
170 ntb->reg->link_status |= LINK_STATUS_UP;
171 else
172 ntb->reg->link_status &= ~LINK_STATUS_UP;
173
174 ntb_link_event(&ntb->ntb);
175 return 0;
176 }
177
178 /**
179 * epf_ntb_configure_mw() - Configure the Outbound Address Space for VHOST
180 * to access the memory window of HOST
181 * @ntb: NTB device that facilitates communication between HOST and VHOST
182 * @mw: Index of the memory window (either 0, 1, 2 or 3)
183 *
184 * EP Outbound Window
185 * +--------+ +-----------+
186 * | | | |
187 * | | | |
188 * | | | |
189 * | | | |
190 * | | +-----------+
191 * | Virtual| | Memory Win|
192 * | NTB | -----------> | |
193 * | Driver | | |
194 * | | +-----------+
195 * | | | |
196 * | | | |
197 * +--------+ +-----------+
198 * VHOST PCI EP
199 *
200 * Returns: Zero for success, or an error code in case of failure
201 */
epf_ntb_configure_mw(struct epf_ntb * ntb,u32 mw)202 static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw)
203 {
204 phys_addr_t phys_addr;
205 u8 func_no, vfunc_no;
206 u64 addr, size;
207 int ret = 0;
208
209 phys_addr = ntb->vpci_mw_phy[mw];
210 addr = ntb->reg->addr;
211 size = ntb->reg->size;
212
213 func_no = ntb->epf->func_no;
214 vfunc_no = ntb->epf->vfunc_no;
215
216 ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size);
217 if (ret)
218 dev_err(&ntb->epf->epc->dev,
219 "Failed to map memory window %d address\n", mw);
220 return ret;
221 }
222
223 /**
224 * epf_ntb_teardown_mw() - Teardown the configured OB ATU
225 * @ntb: NTB device that facilitates communication between HOST and VHOST
226 * @mw: Index of the memory window (either 0, 1, 2 or 3)
227 *
228 * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
229 * pci_epc_unmap_addr()
230 */
epf_ntb_teardown_mw(struct epf_ntb * ntb,u32 mw)231 static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw)
232 {
233 pci_epc_unmap_addr(ntb->epf->epc,
234 ntb->epf->func_no,
235 ntb->epf->vfunc_no,
236 ntb->vpci_mw_phy[mw]);
237 }
238
239 /**
240 * epf_ntb_cmd_handler() - Handle commands provided by the NTB HOST
241 * @work: work_struct for the epf_ntb_epc
242 *
243 * Workqueue function that gets invoked for the two epf_ntb_epc
244 * periodically (once every 5ms) to see if it has received any commands
245 * from NTB HOST. The HOST can send commands to configure doorbell or
246 * configure memory window or to update link status.
247 */
epf_ntb_cmd_handler(struct work_struct * work)248 static void epf_ntb_cmd_handler(struct work_struct *work)
249 {
250 struct epf_ntb_ctrl *ctrl;
251 u32 command, argument;
252 struct epf_ntb *ntb;
253 struct device *dev;
254 int ret;
255 int i;
256
257 ntb = container_of(work, struct epf_ntb, cmd_handler.work);
258
259 for (i = 1; i < ntb->db_count; i++) {
260 if (ntb->epf_db[i]) {
261 ntb->db |= 1 << (i - 1);
262 ntb_db_event(&ntb->ntb, i);
263 ntb->epf_db[i] = 0;
264 }
265 }
266
267 ctrl = ntb->reg;
268 command = ctrl->command;
269 if (!command)
270 goto reset_handler;
271 argument = ctrl->argument;
272
273 ctrl->command = 0;
274 ctrl->argument = 0;
275
276 ctrl = ntb->reg;
277 dev = &ntb->epf->dev;
278
279 switch (command) {
280 case COMMAND_CONFIGURE_DOORBELL:
281 ctrl->command_status = COMMAND_STATUS_OK;
282 break;
283 case COMMAND_TEARDOWN_DOORBELL:
284 ctrl->command_status = COMMAND_STATUS_OK;
285 break;
286 case COMMAND_CONFIGURE_MW:
287 ret = epf_ntb_configure_mw(ntb, argument);
288 if (ret < 0)
289 ctrl->command_status = COMMAND_STATUS_ERROR;
290 else
291 ctrl->command_status = COMMAND_STATUS_OK;
292 break;
293 case COMMAND_TEARDOWN_MW:
294 epf_ntb_teardown_mw(ntb, argument);
295 ctrl->command_status = COMMAND_STATUS_OK;
296 break;
297 case COMMAND_LINK_UP:
298 ntb->linkup = true;
299 ret = epf_ntb_link_up(ntb, true);
300 if (ret < 0)
301 ctrl->command_status = COMMAND_STATUS_ERROR;
302 else
303 ctrl->command_status = COMMAND_STATUS_OK;
304 goto reset_handler;
305 case COMMAND_LINK_DOWN:
306 ntb->linkup = false;
307 ret = epf_ntb_link_up(ntb, false);
308 if (ret < 0)
309 ctrl->command_status = COMMAND_STATUS_ERROR;
310 else
311 ctrl->command_status = COMMAND_STATUS_OK;
312 break;
313 default:
314 dev_err(dev, "UNKNOWN command: %d\n", command);
315 break;
316 }
317
318 reset_handler:
319 queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler,
320 msecs_to_jiffies(5));
321 }
322
323 /**
324 * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
325 * @ntb: EPC associated with one of the HOST which holds peer's outbound
326 * address.
327 *
328 * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
329 * self scratchpad region (removes inbound ATU configuration). While BAR0 is
330 * the default self scratchpad BAR, an NTB could have other BARs for self
331 * scratchpad (because of reserved BARs). This function can get the exact BAR
332 * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
333 *
334 * Please note the self scratchpad region and config region is combined to
335 * a single region and mapped using the same BAR. Also note VHOST's peer
336 * scratchpad is HOST's self scratchpad.
337 *
338 * Returns: void
339 */
epf_ntb_config_sspad_bar_clear(struct epf_ntb * ntb)340 static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb)
341 {
342 struct pci_epf_bar *epf_bar;
343 enum pci_barno barno;
344
345 barno = ntb->epf_ntb_bar[BAR_CONFIG];
346 epf_bar = &ntb->epf->bar[barno];
347
348 pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
349 }
350
351 /**
352 * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
353 * @ntb: NTB device that facilitates communication between HOST and VHOST
354 *
355 * Map BAR0 of EP CONTROLLER which contains the VHOST's config and
356 * self scratchpad region.
357 *
358 * Please note the self scratchpad region and config region is combined to
359 * a single region and mapped using the same BAR.
360 *
361 * Returns: Zero for success, or an error code in case of failure
362 */
epf_ntb_config_sspad_bar_set(struct epf_ntb * ntb)363 static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb)
364 {
365 struct pci_epf_bar *epf_bar;
366 enum pci_barno barno;
367 u8 func_no, vfunc_no;
368 struct device *dev;
369 int ret;
370
371 dev = &ntb->epf->dev;
372 func_no = ntb->epf->func_no;
373 vfunc_no = ntb->epf->vfunc_no;
374 barno = ntb->epf_ntb_bar[BAR_CONFIG];
375 epf_bar = &ntb->epf->bar[barno];
376
377 ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar);
378 if (ret) {
379 dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n");
380 return ret;
381 }
382 return 0;
383 }
384
385 /**
386 * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
387 * config + scratchpad region
388 * @ntb: NTB device that facilitates communication between HOST and VHOST
389 */
epf_ntb_config_spad_bar_free(struct epf_ntb * ntb)390 static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
391 {
392 enum pci_barno barno;
393
394 barno = ntb->epf_ntb_bar[BAR_CONFIG];
395 pci_epf_free_space(ntb->epf, ntb->reg, barno, 0);
396 }
397
398 /**
399 * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
400 * region
401 * @ntb: NTB device that facilitates communication between HOST and VHOST
402 *
403 * Allocate the Local Memory mentioned in the above diagram. The size of
404 * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
405 * is obtained from "spad-count" configfs entry.
406 *
407 * Returns: Zero for success, or an error code in case of failure
408 */
epf_ntb_config_spad_bar_alloc(struct epf_ntb * ntb)409 static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
410 {
411 enum pci_barno barno;
412 struct epf_ntb_ctrl *ctrl;
413 u32 spad_size, ctrl_size;
414 struct pci_epf *epf = ntb->epf;
415 struct device *dev = &epf->dev;
416 u32 spad_count;
417 void *base;
418 int i;
419 const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc,
420 epf->func_no,
421 epf->vfunc_no);
422 barno = ntb->epf_ntb_bar[BAR_CONFIG];
423 spad_count = ntb->spad_count;
424
425 ctrl_size = ALIGN(sizeof(struct epf_ntb_ctrl), sizeof(u32));
426 spad_size = 2 * spad_count * sizeof(u32);
427
428 base = pci_epf_alloc_space(epf, ctrl_size + spad_size,
429 barno, epc_features, 0);
430 if (!base) {
431 dev_err(dev, "Config/Status/SPAD alloc region fail\n");
432 return -ENOMEM;
433 }
434
435 ntb->reg = base;
436
437 ctrl = ntb->reg;
438 ctrl->spad_offset = ctrl_size;
439
440 ctrl->spad_count = spad_count;
441 ctrl->num_mws = ntb->num_mws;
442 ntb->spad_size = spad_size;
443
444 ctrl->db_entry_size = sizeof(u32);
445
446 for (i = 0; i < ntb->db_count; i++) {
447 ntb->reg->db_data[i] = 1 + i;
448 ntb->reg->db_offset[i] = 0;
449 }
450
451 return 0;
452 }
453
454 /**
455 * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability
456 * @ntb: NTB device that facilitates communication between HOST and VHOST
457 *
458 * Configure MSI/MSI-X capability for each interface with number of
459 * interrupts equal to "db_count" configfs entry.
460 *
461 * Returns: Zero for success, or an error code in case of failure
462 */
epf_ntb_configure_interrupt(struct epf_ntb * ntb)463 static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
464 {
465 const struct pci_epc_features *epc_features;
466 struct device *dev;
467 u32 db_count;
468 int ret;
469
470 dev = &ntb->epf->dev;
471
472 epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
473
474 if (!(epc_features->msix_capable || epc_features->msi_capable)) {
475 dev_err(dev, "MSI or MSI-X is required for doorbell\n");
476 return -EINVAL;
477 }
478
479 db_count = ntb->db_count;
480 if (db_count > MAX_DB_COUNT) {
481 dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
482 return -EINVAL;
483 }
484
485 ntb->db_count = db_count;
486
487 if (epc_features->msi_capable) {
488 ret = pci_epc_set_msi(ntb->epf->epc,
489 ntb->epf->func_no,
490 ntb->epf->vfunc_no,
491 16);
492 if (ret) {
493 dev_err(dev, "MSI configuration failed\n");
494 return ret;
495 }
496 }
497
498 return 0;
499 }
500
501 /**
502 * epf_ntb_db_bar_init() - Configure Doorbell window BARs
503 * @ntb: NTB device that facilitates communication between HOST and VHOST
504 *
505 * Returns: Zero for success, or an error code in case of failure
506 */
epf_ntb_db_bar_init(struct epf_ntb * ntb)507 static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
508 {
509 const struct pci_epc_features *epc_features;
510 struct device *dev = &ntb->epf->dev;
511 int ret;
512 struct pci_epf_bar *epf_bar;
513 void __iomem *mw_addr;
514 enum pci_barno barno;
515 size_t size = sizeof(u32) * ntb->db_count;
516
517 epc_features = pci_epc_get_features(ntb->epf->epc,
518 ntb->epf->func_no,
519 ntb->epf->vfunc_no);
520 barno = ntb->epf_ntb_bar[BAR_DB];
521
522 mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, epc_features, 0);
523 if (!mw_addr) {
524 dev_err(dev, "Failed to allocate OB address\n");
525 return -ENOMEM;
526 }
527
528 ntb->epf_db = mw_addr;
529
530 epf_bar = &ntb->epf->bar[barno];
531
532 ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
533 if (ret) {
534 dev_err(dev, "Doorbell BAR set failed\n");
535 goto err_alloc_peer_mem;
536 }
537 return ret;
538
539 err_alloc_peer_mem:
540 pci_epf_free_space(ntb->epf, mw_addr, barno, 0);
541 return -1;
542 }
543
544 static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws);
545
546 /**
547 * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory
548 * allocated in peer's outbound address space
549 * @ntb: NTB device that facilitates communication between HOST and VHOST
550 */
epf_ntb_db_bar_clear(struct epf_ntb * ntb)551 static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
552 {
553 enum pci_barno barno;
554
555 barno = ntb->epf_ntb_bar[BAR_DB];
556 pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0);
557 pci_epc_clear_bar(ntb->epf->epc,
558 ntb->epf->func_no,
559 ntb->epf->vfunc_no,
560 &ntb->epf->bar[barno]);
561 }
562
563 /**
564 * epf_ntb_mw_bar_init() - Configure Memory window BARs
565 * @ntb: NTB device that facilitates communication between HOST and VHOST
566 *
567 * Returns: Zero for success, or an error code in case of failure
568 */
epf_ntb_mw_bar_init(struct epf_ntb * ntb)569 static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
570 {
571 int ret = 0;
572 int i;
573 u64 size;
574 enum pci_barno barno;
575 struct device *dev = &ntb->epf->dev;
576
577 for (i = 0; i < ntb->num_mws; i++) {
578 size = ntb->mws_size[i];
579 barno = ntb->epf_ntb_bar[BAR_MW0 + i];
580
581 ntb->epf->bar[barno].barno = barno;
582 ntb->epf->bar[barno].size = size;
583 ntb->epf->bar[barno].addr = NULL;
584 ntb->epf->bar[barno].phys_addr = 0;
585 ntb->epf->bar[barno].flags |= upper_32_bits(size) ?
586 PCI_BASE_ADDRESS_MEM_TYPE_64 :
587 PCI_BASE_ADDRESS_MEM_TYPE_32;
588
589 ret = pci_epc_set_bar(ntb->epf->epc,
590 ntb->epf->func_no,
591 ntb->epf->vfunc_no,
592 &ntb->epf->bar[barno]);
593 if (ret) {
594 dev_err(dev, "MW set failed\n");
595 goto err_alloc_mem;
596 }
597
598 /* Allocate EPC outbound memory windows to vpci vntb device */
599 ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc,
600 &ntb->vpci_mw_phy[i],
601 size);
602 if (!ntb->vpci_mw_addr[i]) {
603 ret = -ENOMEM;
604 dev_err(dev, "Failed to allocate source address\n");
605 goto err_set_bar;
606 }
607 }
608
609 return ret;
610
611 err_set_bar:
612 pci_epc_clear_bar(ntb->epf->epc,
613 ntb->epf->func_no,
614 ntb->epf->vfunc_no,
615 &ntb->epf->bar[barno]);
616 err_alloc_mem:
617 epf_ntb_mw_bar_clear(ntb, i);
618 return ret;
619 }
620
621 /**
622 * epf_ntb_mw_bar_clear() - Clear Memory window BARs
623 * @ntb: NTB device that facilitates communication between HOST and VHOST
624 * @num_mws: the number of Memory window BARs that to be cleared
625 */
epf_ntb_mw_bar_clear(struct epf_ntb * ntb,int num_mws)626 static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
627 {
628 enum pci_barno barno;
629 int i;
630
631 for (i = 0; i < num_mws; i++) {
632 barno = ntb->epf_ntb_bar[BAR_MW0 + i];
633 pci_epc_clear_bar(ntb->epf->epc,
634 ntb->epf->func_no,
635 ntb->epf->vfunc_no,
636 &ntb->epf->bar[barno]);
637
638 pci_epc_mem_free_addr(ntb->epf->epc,
639 ntb->vpci_mw_phy[i],
640 ntb->vpci_mw_addr[i],
641 ntb->mws_size[i]);
642 }
643 }
644
645 /**
646 * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
647 * @ntb: NTB device that facilitates communication between HOST and VHOST
648 *
649 * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
650 */
epf_ntb_epc_destroy(struct epf_ntb * ntb)651 static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
652 {
653 pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0);
654 pci_epc_put(ntb->epf->epc);
655 }
656
657 /**
658 * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
659 * constructs (scratchpad region, doorbell, memorywindow)
660 * @ntb: NTB device that facilitates communication between HOST and VHOST
661 *
662 * Returns: Zero for success, or an error code in case of failure
663 */
epf_ntb_init_epc_bar(struct epf_ntb * ntb)664 static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
665 {
666 const struct pci_epc_features *epc_features;
667 enum pci_barno barno;
668 enum epf_ntb_bar bar;
669 struct device *dev;
670 u32 num_mws;
671 int i;
672
673 barno = BAR_0;
674 num_mws = ntb->num_mws;
675 dev = &ntb->epf->dev;
676 epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
677
678 /* These are required BARs which are mandatory for NTB functionality */
679 for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) {
680 barno = pci_epc_get_next_free_bar(epc_features, barno);
681 if (barno < 0) {
682 dev_err(dev, "Fail to get NTB function BAR\n");
683 return barno;
684 }
685 ntb->epf_ntb_bar[bar] = barno;
686 }
687
688 /* These are optional BARs which don't impact NTB functionality */
689 for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) {
690 barno = pci_epc_get_next_free_bar(epc_features, barno);
691 if (barno < 0) {
692 ntb->num_mws = i;
693 dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
694 }
695 ntb->epf_ntb_bar[bar] = barno;
696 }
697
698 return 0;
699 }
700
701 /**
702 * epf_ntb_epc_init() - Initialize NTB interface
703 * @ntb: NTB device that facilitates communication between HOST and VHOST
704 *
705 * Wrapper to initialize a particular EPC interface and start the workqueue
706 * to check for commands from HOST. This function will write to the
707 * EP controller HW for configuring it.
708 *
709 * Returns: Zero for success, or an error code in case of failure
710 */
epf_ntb_epc_init(struct epf_ntb * ntb)711 static int epf_ntb_epc_init(struct epf_ntb *ntb)
712 {
713 u8 func_no, vfunc_no;
714 struct pci_epc *epc;
715 struct pci_epf *epf;
716 struct device *dev;
717 int ret;
718
719 epf = ntb->epf;
720 dev = &epf->dev;
721 epc = epf->epc;
722 func_no = ntb->epf->func_no;
723 vfunc_no = ntb->epf->vfunc_no;
724
725 ret = epf_ntb_config_sspad_bar_set(ntb);
726 if (ret) {
727 dev_err(dev, "Config/self SPAD BAR init failed");
728 return ret;
729 }
730
731 ret = epf_ntb_configure_interrupt(ntb);
732 if (ret) {
733 dev_err(dev, "Interrupt configuration failed\n");
734 goto err_config_interrupt;
735 }
736
737 ret = epf_ntb_db_bar_init(ntb);
738 if (ret) {
739 dev_err(dev, "DB BAR init failed\n");
740 goto err_db_bar_init;
741 }
742
743 ret = epf_ntb_mw_bar_init(ntb);
744 if (ret) {
745 dev_err(dev, "MW BAR init failed\n");
746 goto err_mw_bar_init;
747 }
748
749 if (vfunc_no <= 1) {
750 ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
751 if (ret) {
752 dev_err(dev, "Configuration header write failed\n");
753 goto err_write_header;
754 }
755 }
756
757 INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler);
758 queue_work(kpcintb_workqueue, &ntb->cmd_handler.work);
759
760 return 0;
761
762 err_write_header:
763 epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
764 err_mw_bar_init:
765 epf_ntb_db_bar_clear(ntb);
766 err_db_bar_init:
767 err_config_interrupt:
768 epf_ntb_config_sspad_bar_clear(ntb);
769
770 return ret;
771 }
772
773
774 /**
775 * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
776 * @ntb: NTB device that facilitates communication between HOST and VHOST
777 *
778 * Wrapper to cleanup all NTB interfaces.
779 */
epf_ntb_epc_cleanup(struct epf_ntb * ntb)780 static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
781 {
782 epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
783 epf_ntb_db_bar_clear(ntb);
784 epf_ntb_config_sspad_bar_clear(ntb);
785 }
786
787 #define EPF_NTB_R(_name) \
788 static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
789 char *page) \
790 { \
791 struct config_group *group = to_config_group(item); \
792 struct epf_ntb *ntb = to_epf_ntb(group); \
793 \
794 return sprintf(page, "%d\n", ntb->_name); \
795 }
796
797 #define EPF_NTB_W(_name) \
798 static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
799 const char *page, size_t len) \
800 { \
801 struct config_group *group = to_config_group(item); \
802 struct epf_ntb *ntb = to_epf_ntb(group); \
803 u32 val; \
804 int ret; \
805 \
806 ret = kstrtou32(page, 0, &val); \
807 if (ret) \
808 return ret; \
809 \
810 ntb->_name = val; \
811 \
812 return len; \
813 }
814
815 #define EPF_NTB_MW_R(_name) \
816 static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
817 char *page) \
818 { \
819 struct config_group *group = to_config_group(item); \
820 struct epf_ntb *ntb = to_epf_ntb(group); \
821 struct device *dev = &ntb->epf->dev; \
822 int win_no; \
823 \
824 if (sscanf(#_name, "mw%d", &win_no) != 1) \
825 return -EINVAL; \
826 \
827 if (win_no <= 0 || win_no > ntb->num_mws) { \
828 dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
829 return -EINVAL; \
830 } \
831 \
832 return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
833 }
834
835 #define EPF_NTB_MW_W(_name) \
836 static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
837 const char *page, size_t len) \
838 { \
839 struct config_group *group = to_config_group(item); \
840 struct epf_ntb *ntb = to_epf_ntb(group); \
841 struct device *dev = &ntb->epf->dev; \
842 int win_no; \
843 u64 val; \
844 int ret; \
845 \
846 ret = kstrtou64(page, 0, &val); \
847 if (ret) \
848 return ret; \
849 \
850 if (sscanf(#_name, "mw%d", &win_no) != 1) \
851 return -EINVAL; \
852 \
853 if (win_no <= 0 || win_no > ntb->num_mws) { \
854 dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
855 return -EINVAL; \
856 } \
857 \
858 ntb->mws_size[win_no - 1] = val; \
859 \
860 return len; \
861 }
862
epf_ntb_num_mws_store(struct config_item * item,const char * page,size_t len)863 static ssize_t epf_ntb_num_mws_store(struct config_item *item,
864 const char *page, size_t len)
865 {
866 struct config_group *group = to_config_group(item);
867 struct epf_ntb *ntb = to_epf_ntb(group);
868 u32 val;
869 int ret;
870
871 ret = kstrtou32(page, 0, &val);
872 if (ret)
873 return ret;
874
875 if (val > MAX_MW)
876 return -EINVAL;
877
878 ntb->num_mws = val;
879
880 return len;
881 }
882
883 EPF_NTB_R(spad_count)
884 EPF_NTB_W(spad_count)
885 EPF_NTB_R(db_count)
886 EPF_NTB_W(db_count)
887 EPF_NTB_R(num_mws)
888 EPF_NTB_R(vbus_number)
889 EPF_NTB_W(vbus_number)
890 EPF_NTB_R(vntb_pid)
891 EPF_NTB_W(vntb_pid)
892 EPF_NTB_R(vntb_vid)
893 EPF_NTB_W(vntb_vid)
894 EPF_NTB_MW_R(mw1)
895 EPF_NTB_MW_W(mw1)
896 EPF_NTB_MW_R(mw2)
897 EPF_NTB_MW_W(mw2)
898 EPF_NTB_MW_R(mw3)
899 EPF_NTB_MW_W(mw3)
900 EPF_NTB_MW_R(mw4)
901 EPF_NTB_MW_W(mw4)
902
903 CONFIGFS_ATTR(epf_ntb_, spad_count);
904 CONFIGFS_ATTR(epf_ntb_, db_count);
905 CONFIGFS_ATTR(epf_ntb_, num_mws);
906 CONFIGFS_ATTR(epf_ntb_, mw1);
907 CONFIGFS_ATTR(epf_ntb_, mw2);
908 CONFIGFS_ATTR(epf_ntb_, mw3);
909 CONFIGFS_ATTR(epf_ntb_, mw4);
910 CONFIGFS_ATTR(epf_ntb_, vbus_number);
911 CONFIGFS_ATTR(epf_ntb_, vntb_pid);
912 CONFIGFS_ATTR(epf_ntb_, vntb_vid);
913
914 static struct configfs_attribute *epf_ntb_attrs[] = {
915 &epf_ntb_attr_spad_count,
916 &epf_ntb_attr_db_count,
917 &epf_ntb_attr_num_mws,
918 &epf_ntb_attr_mw1,
919 &epf_ntb_attr_mw2,
920 &epf_ntb_attr_mw3,
921 &epf_ntb_attr_mw4,
922 &epf_ntb_attr_vbus_number,
923 &epf_ntb_attr_vntb_pid,
924 &epf_ntb_attr_vntb_vid,
925 NULL,
926 };
927
928 static const struct config_item_type ntb_group_type = {
929 .ct_attrs = epf_ntb_attrs,
930 .ct_owner = THIS_MODULE,
931 };
932
933 /**
934 * epf_ntb_add_cfs() - Add configfs directory specific to NTB
935 * @epf: NTB endpoint function device
936 * @group: A pointer to the config_group structure referencing a group of
937 * config_items of a specific type that belong to a specific sub-system.
938 *
939 * Add configfs directory specific to NTB. This directory will hold
940 * NTB specific properties like db_count, spad_count, num_mws etc.,
941 *
942 * Returns: Pointer to config_group
943 */
epf_ntb_add_cfs(struct pci_epf * epf,struct config_group * group)944 static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
945 struct config_group *group)
946 {
947 struct epf_ntb *ntb = epf_get_drvdata(epf);
948 struct config_group *ntb_group = &ntb->group;
949 struct device *dev = &epf->dev;
950
951 config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
952
953 return ntb_group;
954 }
955
956 /*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/
957
958 static u32 pci_space[] = {
959 0xffffffff, /* Device ID, Vendor ID */
960 0, /* Status, Command */
961 0xffffffff, /* Base Class, Subclass, Prog Intf, Revision ID */
962 0x40, /* BIST, Header Type, Latency Timer, Cache Line Size */
963 0, /* BAR 0 */
964 0, /* BAR 1 */
965 0, /* BAR 2 */
966 0, /* BAR 3 */
967 0, /* BAR 4 */
968 0, /* BAR 5 */
969 0, /* Cardbus CIS Pointer */
970 0, /* Subsystem ID, Subsystem Vendor ID */
971 0, /* ROM Base Address */
972 0, /* Reserved, Capabilities Pointer */
973 0, /* Reserved */
974 0, /* Max_Lat, Min_Gnt, Interrupt Pin, Interrupt Line */
975 };
976
pci_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)977 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
978 {
979 if (devfn == 0) {
980 memcpy(val, ((u8 *)pci_space) + where, size);
981 return PCIBIOS_SUCCESSFUL;
982 }
983 return PCIBIOS_DEVICE_NOT_FOUND;
984 }
985
pci_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)986 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
987 {
988 return 0;
989 }
990
991 static struct pci_ops vpci_ops = {
992 .read = pci_read,
993 .write = pci_write,
994 };
995
vpci_scan_bus(void * sysdata)996 static int vpci_scan_bus(void *sysdata)
997 {
998 struct pci_bus *vpci_bus;
999 struct epf_ntb *ndev = sysdata;
1000
1001 vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
1002 if (!vpci_bus) {
1003 pr_err("create pci bus failed\n");
1004 return -EINVAL;
1005 }
1006
1007 pci_bus_add_devices(vpci_bus);
1008
1009 return 0;
1010 }
1011
1012 /*==================== Virtual PCIe NTB driver ==========================*/
1013
vntb_epf_mw_count(struct ntb_dev * ntb,int pidx)1014 static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
1015 {
1016 struct epf_ntb *ndev = ntb_ndev(ntb);
1017
1018 return ndev->num_mws;
1019 }
1020
vntb_epf_spad_count(struct ntb_dev * ntb)1021 static int vntb_epf_spad_count(struct ntb_dev *ntb)
1022 {
1023 return ntb_ndev(ntb)->spad_count;
1024 }
1025
vntb_epf_peer_mw_count(struct ntb_dev * ntb)1026 static int vntb_epf_peer_mw_count(struct ntb_dev *ntb)
1027 {
1028 return ntb_ndev(ntb)->num_mws;
1029 }
1030
vntb_epf_db_valid_mask(struct ntb_dev * ntb)1031 static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb)
1032 {
1033 return BIT_ULL(ntb_ndev(ntb)->db_count) - 1;
1034 }
1035
vntb_epf_db_set_mask(struct ntb_dev * ntb,u64 db_bits)1036 static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1037 {
1038 return 0;
1039 }
1040
vntb_epf_mw_set_trans(struct ntb_dev * ndev,int pidx,int idx,dma_addr_t addr,resource_size_t size)1041 static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
1042 dma_addr_t addr, resource_size_t size)
1043 {
1044 struct epf_ntb *ntb = ntb_ndev(ndev);
1045 struct pci_epf_bar *epf_bar;
1046 enum pci_barno barno;
1047 int ret;
1048 struct device *dev;
1049
1050 dev = &ntb->ntb.dev;
1051 barno = ntb->epf_ntb_bar[BAR_MW0 + idx];
1052 epf_bar = &ntb->epf->bar[barno];
1053 epf_bar->phys_addr = addr;
1054 epf_bar->barno = barno;
1055 epf_bar->size = size;
1056
1057 ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar);
1058 if (ret) {
1059 dev_err(dev, "failure set mw trans\n");
1060 return ret;
1061 }
1062 return 0;
1063 }
1064
vntb_epf_mw_clear_trans(struct ntb_dev * ntb,int pidx,int idx)1065 static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
1066 {
1067 return 0;
1068 }
1069
vntb_epf_peer_mw_get_addr(struct ntb_dev * ndev,int idx,phys_addr_t * base,resource_size_t * size)1070 static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx,
1071 phys_addr_t *base, resource_size_t *size)
1072 {
1073
1074 struct epf_ntb *ntb = ntb_ndev(ndev);
1075
1076 if (base)
1077 *base = ntb->vpci_mw_phy[idx];
1078
1079 if (size)
1080 *size = ntb->mws_size[idx];
1081
1082 return 0;
1083 }
1084
vntb_epf_link_enable(struct ntb_dev * ntb,enum ntb_speed max_speed,enum ntb_width max_width)1085 static int vntb_epf_link_enable(struct ntb_dev *ntb,
1086 enum ntb_speed max_speed,
1087 enum ntb_width max_width)
1088 {
1089 return 0;
1090 }
1091
vntb_epf_spad_read(struct ntb_dev * ndev,int idx)1092 static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx)
1093 {
1094 struct epf_ntb *ntb = ntb_ndev(ndev);
1095 int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * sizeof(u32);
1096 u32 val;
1097 void __iomem *base = (void __iomem *)ntb->reg;
1098
1099 val = readl(base + off + ct + idx * sizeof(u32));
1100 return val;
1101 }
1102
vntb_epf_spad_write(struct ntb_dev * ndev,int idx,u32 val)1103 static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val)
1104 {
1105 struct epf_ntb *ntb = ntb_ndev(ndev);
1106 struct epf_ntb_ctrl *ctrl = ntb->reg;
1107 int off = ctrl->spad_offset, ct = ctrl->spad_count * sizeof(u32);
1108 void __iomem *base = (void __iomem *)ntb->reg;
1109
1110 writel(val, base + off + ct + idx * sizeof(u32));
1111 return 0;
1112 }
1113
vntb_epf_peer_spad_read(struct ntb_dev * ndev,int pidx,int idx)1114 static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx)
1115 {
1116 struct epf_ntb *ntb = ntb_ndev(ndev);
1117 struct epf_ntb_ctrl *ctrl = ntb->reg;
1118 int off = ctrl->spad_offset;
1119 void __iomem *base = (void __iomem *)ntb->reg;
1120 u32 val;
1121
1122 val = readl(base + off + idx * sizeof(u32));
1123 return val;
1124 }
1125
vntb_epf_peer_spad_write(struct ntb_dev * ndev,int pidx,int idx,u32 val)1126 static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val)
1127 {
1128 struct epf_ntb *ntb = ntb_ndev(ndev);
1129 struct epf_ntb_ctrl *ctrl = ntb->reg;
1130 int off = ctrl->spad_offset;
1131 void __iomem *base = (void __iomem *)ntb->reg;
1132
1133 writel(val, base + off + idx * sizeof(u32));
1134 return 0;
1135 }
1136
vntb_epf_peer_db_set(struct ntb_dev * ndev,u64 db_bits)1137 static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
1138 {
1139 u32 interrupt_num = ffs(db_bits) + 1;
1140 struct epf_ntb *ntb = ntb_ndev(ndev);
1141 u8 func_no, vfunc_no;
1142 int ret;
1143
1144 func_no = ntb->epf->func_no;
1145 vfunc_no = ntb->epf->vfunc_no;
1146
1147 ret = pci_epc_raise_irq(ntb->epf->epc, func_no, vfunc_no,
1148 PCI_IRQ_MSI, interrupt_num + 1);
1149 if (ret)
1150 dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
1151
1152 return ret;
1153 }
1154
vntb_epf_db_read(struct ntb_dev * ndev)1155 static u64 vntb_epf_db_read(struct ntb_dev *ndev)
1156 {
1157 struct epf_ntb *ntb = ntb_ndev(ndev);
1158
1159 return ntb->db;
1160 }
1161
vntb_epf_mw_get_align(struct ntb_dev * ndev,int pidx,int idx,resource_size_t * addr_align,resource_size_t * size_align,resource_size_t * size_max)1162 static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx,
1163 resource_size_t *addr_align,
1164 resource_size_t *size_align,
1165 resource_size_t *size_max)
1166 {
1167 struct epf_ntb *ntb = ntb_ndev(ndev);
1168
1169 if (addr_align)
1170 *addr_align = SZ_4K;
1171
1172 if (size_align)
1173 *size_align = 1;
1174
1175 if (size_max)
1176 *size_max = ntb->mws_size[idx];
1177
1178 return 0;
1179 }
1180
vntb_epf_link_is_up(struct ntb_dev * ndev,enum ntb_speed * speed,enum ntb_width * width)1181 static u64 vntb_epf_link_is_up(struct ntb_dev *ndev,
1182 enum ntb_speed *speed,
1183 enum ntb_width *width)
1184 {
1185 struct epf_ntb *ntb = ntb_ndev(ndev);
1186
1187 return ntb->reg->link_status;
1188 }
1189
vntb_epf_db_clear_mask(struct ntb_dev * ndev,u64 db_bits)1190 static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits)
1191 {
1192 return 0;
1193 }
1194
vntb_epf_db_clear(struct ntb_dev * ndev,u64 db_bits)1195 static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits)
1196 {
1197 struct epf_ntb *ntb = ntb_ndev(ndev);
1198
1199 ntb->db &= ~db_bits;
1200 return 0;
1201 }
1202
vntb_epf_link_disable(struct ntb_dev * ntb)1203 static int vntb_epf_link_disable(struct ntb_dev *ntb)
1204 {
1205 return 0;
1206 }
1207
1208 static const struct ntb_dev_ops vntb_epf_ops = {
1209 .mw_count = vntb_epf_mw_count,
1210 .spad_count = vntb_epf_spad_count,
1211 .peer_mw_count = vntb_epf_peer_mw_count,
1212 .db_valid_mask = vntb_epf_db_valid_mask,
1213 .db_set_mask = vntb_epf_db_set_mask,
1214 .mw_set_trans = vntb_epf_mw_set_trans,
1215 .mw_clear_trans = vntb_epf_mw_clear_trans,
1216 .peer_mw_get_addr = vntb_epf_peer_mw_get_addr,
1217 .link_enable = vntb_epf_link_enable,
1218 .spad_read = vntb_epf_spad_read,
1219 .spad_write = vntb_epf_spad_write,
1220 .peer_spad_read = vntb_epf_peer_spad_read,
1221 .peer_spad_write = vntb_epf_peer_spad_write,
1222 .peer_db_set = vntb_epf_peer_db_set,
1223 .db_read = vntb_epf_db_read,
1224 .mw_get_align = vntb_epf_mw_get_align,
1225 .link_is_up = vntb_epf_link_is_up,
1226 .db_clear_mask = vntb_epf_db_clear_mask,
1227 .db_clear = vntb_epf_db_clear,
1228 .link_disable = vntb_epf_link_disable,
1229 };
1230
pci_vntb_probe(struct pci_dev * pdev,const struct pci_device_id * id)1231 static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1232 {
1233 int ret;
1234 struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata;
1235 struct device *dev = &pdev->dev;
1236
1237 ndev->ntb.pdev = pdev;
1238 ndev->ntb.topo = NTB_TOPO_NONE;
1239 ndev->ntb.ops = &vntb_epf_ops;
1240
1241 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1242 if (ret) {
1243 dev_err(dev, "Cannot set DMA mask\n");
1244 return ret;
1245 }
1246
1247 ret = ntb_register_device(&ndev->ntb);
1248 if (ret) {
1249 dev_err(dev, "Failed to register NTB device\n");
1250 return ret;
1251 }
1252
1253 dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
1254 return 0;
1255 }
1256
1257 static struct pci_device_id pci_vntb_table[] = {
1258 {
1259 PCI_DEVICE(0xffff, 0xffff),
1260 },
1261 {},
1262 };
1263
1264 static struct pci_driver vntb_pci_driver = {
1265 .name = "pci-vntb",
1266 .id_table = pci_vntb_table,
1267 .probe = pci_vntb_probe,
1268 };
1269
1270 /* ============ PCIe EPF Driver Bind ====================*/
1271
1272 /**
1273 * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
1274 * @epf: NTB endpoint function device
1275 *
1276 * Initialize both the endpoint controllers associated with NTB function device.
1277 * Invoked when a primary interface or secondary interface is bound to EPC
1278 * device. This function will succeed only when EPC is bound to both the
1279 * interfaces.
1280 *
1281 * Returns: Zero for success, or an error code in case of failure
1282 */
epf_ntb_bind(struct pci_epf * epf)1283 static int epf_ntb_bind(struct pci_epf *epf)
1284 {
1285 struct epf_ntb *ntb = epf_get_drvdata(epf);
1286 struct device *dev = &epf->dev;
1287 int ret;
1288
1289 if (!epf->epc) {
1290 dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
1291 return 0;
1292 }
1293
1294 ret = epf_ntb_init_epc_bar(ntb);
1295 if (ret) {
1296 dev_err(dev, "Failed to create NTB EPC\n");
1297 goto err_bar_init;
1298 }
1299
1300 ret = epf_ntb_config_spad_bar_alloc(ntb);
1301 if (ret) {
1302 dev_err(dev, "Failed to allocate BAR memory\n");
1303 goto err_bar_alloc;
1304 }
1305
1306 ret = epf_ntb_epc_init(ntb);
1307 if (ret) {
1308 dev_err(dev, "Failed to initialize EPC\n");
1309 goto err_bar_alloc;
1310 }
1311
1312 epf_set_drvdata(epf, ntb);
1313
1314 pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid;
1315 pci_vntb_table[0].vendor = ntb->vntb_vid;
1316 pci_vntb_table[0].device = ntb->vntb_pid;
1317
1318 ret = pci_register_driver(&vntb_pci_driver);
1319 if (ret) {
1320 dev_err(dev, "failure register vntb pci driver\n");
1321 goto err_epc_cleanup;
1322 }
1323
1324 ret = vpci_scan_bus(ntb);
1325 if (ret)
1326 goto err_unregister;
1327
1328 return 0;
1329
1330 err_unregister:
1331 pci_unregister_driver(&vntb_pci_driver);
1332 err_epc_cleanup:
1333 epf_ntb_epc_cleanup(ntb);
1334 err_bar_alloc:
1335 epf_ntb_config_spad_bar_free(ntb);
1336
1337 err_bar_init:
1338 epf_ntb_epc_destroy(ntb);
1339
1340 return ret;
1341 }
1342
1343 /**
1344 * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
1345 * @epf: NTB endpoint function device
1346 *
1347 * Cleanup the initialization from epf_ntb_bind()
1348 */
epf_ntb_unbind(struct pci_epf * epf)1349 static void epf_ntb_unbind(struct pci_epf *epf)
1350 {
1351 struct epf_ntb *ntb = epf_get_drvdata(epf);
1352
1353 epf_ntb_epc_cleanup(ntb);
1354 epf_ntb_config_spad_bar_free(ntb);
1355 epf_ntb_epc_destroy(ntb);
1356
1357 pci_unregister_driver(&vntb_pci_driver);
1358 }
1359
1360 // EPF driver probe
1361 static const struct pci_epf_ops epf_ntb_ops = {
1362 .bind = epf_ntb_bind,
1363 .unbind = epf_ntb_unbind,
1364 .add_cfs = epf_ntb_add_cfs,
1365 };
1366
1367 /**
1368 * epf_ntb_probe() - Probe NTB function driver
1369 * @epf: NTB endpoint function device
1370 * @id: NTB endpoint function device ID
1371 *
1372 * Probe NTB function driver when endpoint function bus detects a NTB
1373 * endpoint function.
1374 *
1375 * Returns: Zero for success, or an error code in case of failure
1376 */
epf_ntb_probe(struct pci_epf * epf,const struct pci_epf_device_id * id)1377 static int epf_ntb_probe(struct pci_epf *epf,
1378 const struct pci_epf_device_id *id)
1379 {
1380 struct epf_ntb *ntb;
1381 struct device *dev;
1382
1383 dev = &epf->dev;
1384
1385 ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
1386 if (!ntb)
1387 return -ENOMEM;
1388
1389 epf->header = &epf_ntb_header;
1390 ntb->epf = epf;
1391 ntb->vbus_number = 0xff;
1392 epf_set_drvdata(epf, ntb);
1393
1394 dev_info(dev, "pci-ep epf driver loaded\n");
1395 return 0;
1396 }
1397
1398 static const struct pci_epf_device_id epf_ntb_ids[] = {
1399 {
1400 .name = "pci_epf_vntb",
1401 },
1402 {},
1403 };
1404
1405 static struct pci_epf_driver epf_ntb_driver = {
1406 .driver.name = "pci_epf_vntb",
1407 .probe = epf_ntb_probe,
1408 .id_table = epf_ntb_ids,
1409 .ops = &epf_ntb_ops,
1410 .owner = THIS_MODULE,
1411 };
1412
epf_ntb_init(void)1413 static int __init epf_ntb_init(void)
1414 {
1415 int ret;
1416
1417 kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
1418 WQ_HIGHPRI, 0);
1419 ret = pci_epf_register_driver(&epf_ntb_driver);
1420 if (ret) {
1421 destroy_workqueue(kpcintb_workqueue);
1422 pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
1423 return ret;
1424 }
1425
1426 return 0;
1427 }
1428 module_init(epf_ntb_init);
1429
epf_ntb_exit(void)1430 static void __exit epf_ntb_exit(void)
1431 {
1432 pci_epf_unregister_driver(&epf_ntb_driver);
1433 destroy_workqueue(kpcintb_workqueue);
1434 }
1435 module_exit(epf_ntb_exit);
1436
1437 MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
1438 MODULE_AUTHOR("Frank Li <Frank.li@nxp.com>");
1439 MODULE_LICENSE("GPL v2");
1440