xref: /linux/drivers/pci/endpoint/pci-epc-core.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Endpoint *Controller* (EPC) library
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 
13 #include <linux/pci-epc.h>
14 #include <linux/pci-epf.h>
15 #include <linux/pci-ep-cfs.h>
16 
17 static const struct class pci_epc_class = {
18 	.name = "pci_epc",
19 };
20 
21 static void devm_pci_epc_release(struct device *dev, void *res)
22 {
23 	struct pci_epc *epc = *(struct pci_epc **)res;
24 
25 	pci_epc_destroy(epc);
26 }
27 
28 static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
29 {
30 	struct pci_epc **epc = res;
31 
32 	return *epc == match_data;
33 }
34 
35 /**
36  * pci_epc_put() - release the PCI endpoint controller
37  * @epc: epc returned by pci_epc_get()
38  *
39  * release the refcount the caller obtained by invoking pci_epc_get()
40  */
41 void pci_epc_put(struct pci_epc *epc)
42 {
43 	if (IS_ERR_OR_NULL(epc))
44 		return;
45 
46 	module_put(epc->ops->owner);
47 	put_device(&epc->dev);
48 }
49 EXPORT_SYMBOL_GPL(pci_epc_put);
50 
51 /**
52  * pci_epc_get() - get the PCI endpoint controller
53  * @epc_name: device name of the endpoint controller
54  *
55  * Invoke to get struct pci_epc * corresponding to the device name of the
56  * endpoint controller
57  */
58 struct pci_epc *pci_epc_get(const char *epc_name)
59 {
60 	int ret = -EINVAL;
61 	struct pci_epc *epc;
62 	struct device *dev;
63 	struct class_dev_iter iter;
64 
65 	class_dev_iter_init(&iter, &pci_epc_class, NULL, NULL);
66 	while ((dev = class_dev_iter_next(&iter))) {
67 		if (strcmp(epc_name, dev_name(dev)))
68 			continue;
69 
70 		epc = to_pci_epc(dev);
71 		if (!try_module_get(epc->ops->owner)) {
72 			ret = -EINVAL;
73 			goto err;
74 		}
75 
76 		class_dev_iter_exit(&iter);
77 		get_device(&epc->dev);
78 		return epc;
79 	}
80 
81 err:
82 	class_dev_iter_exit(&iter);
83 	return ERR_PTR(ret);
84 }
85 EXPORT_SYMBOL_GPL(pci_epc_get);
86 
87 /**
88  * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
89  * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
90  *
91  * Invoke to get the first unreserved BAR that can be used by the endpoint
92  * function.
93  */
94 enum pci_barno
95 pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
96 {
97 	return pci_epc_get_next_free_bar(epc_features, BAR_0);
98 }
99 EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
100 
101 /**
102  * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
103  * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
104  * @bar: the starting BAR number from where unreserved BAR should be searched
105  *
106  * Invoke to get the next unreserved BAR starting from @bar that can be used
107  * for endpoint function.
108  */
109 enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
110 					 *epc_features, enum pci_barno bar)
111 {
112 	int i;
113 
114 	if (!epc_features)
115 		return BAR_0;
116 
117 	/* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
118 	if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
119 		bar++;
120 
121 	for (i = bar; i < PCI_STD_NUM_BARS; i++) {
122 		/* If the BAR is not reserved, return it. */
123 		if (epc_features->bar[i].type != BAR_RESERVED)
124 			return i;
125 	}
126 
127 	return NO_BAR;
128 }
129 EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
130 
131 static bool pci_epc_function_is_valid(struct pci_epc *epc,
132 				      u8 func_no, u8 vfunc_no)
133 {
134 	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
135 		return false;
136 
137 	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
138 		return false;
139 
140 	return true;
141 }
142 
143 /**
144  * pci_epc_get_features() - get the features supported by EPC
145  * @epc: the features supported by *this* EPC device will be returned
146  * @func_no: the features supported by the EPC device specific to the
147  *	     endpoint function with func_no will be returned
148  * @vfunc_no: the features supported by the EPC device specific to the
149  *	     virtual endpoint function with vfunc_no will be returned
150  *
151  * Invoke to get the features provided by the EPC which may be
152  * specific to an endpoint function. Returns pci_epc_features on success
153  * and NULL for any failures.
154  */
155 const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
156 						    u8 func_no, u8 vfunc_no)
157 {
158 	const struct pci_epc_features *epc_features;
159 
160 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
161 		return NULL;
162 
163 	if (!epc->ops->get_features)
164 		return NULL;
165 
166 	mutex_lock(&epc->lock);
167 	epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
168 	mutex_unlock(&epc->lock);
169 
170 	return epc_features;
171 }
172 EXPORT_SYMBOL_GPL(pci_epc_get_features);
173 
174 /**
175  * pci_epc_stop() - stop the PCI link
176  * @epc: the link of the EPC device that has to be stopped
177  *
178  * Invoke to stop the PCI link
179  */
180 void pci_epc_stop(struct pci_epc *epc)
181 {
182 	if (IS_ERR(epc) || !epc->ops->stop)
183 		return;
184 
185 	mutex_lock(&epc->lock);
186 	epc->ops->stop(epc);
187 	mutex_unlock(&epc->lock);
188 }
189 EXPORT_SYMBOL_GPL(pci_epc_stop);
190 
191 /**
192  * pci_epc_start() - start the PCI link
193  * @epc: the link of *this* EPC device has to be started
194  *
195  * Invoke to start the PCI link
196  */
197 int pci_epc_start(struct pci_epc *epc)
198 {
199 	int ret;
200 
201 	if (IS_ERR(epc))
202 		return -EINVAL;
203 
204 	if (!epc->ops->start)
205 		return 0;
206 
207 	mutex_lock(&epc->lock);
208 	ret = epc->ops->start(epc);
209 	mutex_unlock(&epc->lock);
210 
211 	return ret;
212 }
213 EXPORT_SYMBOL_GPL(pci_epc_start);
214 
215 /**
216  * pci_epc_raise_irq() - interrupt the host system
217  * @epc: the EPC device which has to interrupt the host
218  * @func_no: the physical endpoint function number in the EPC device
219  * @vfunc_no: the virtual endpoint function number in the physical function
220  * @type: specify the type of interrupt; INTX, MSI or MSI-X
221  * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
222  *
223  * Invoke to raise an INTX, MSI or MSI-X interrupt
224  */
225 int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
226 		      unsigned int type, u16 interrupt_num)
227 {
228 	int ret;
229 
230 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
231 		return -EINVAL;
232 
233 	if (!epc->ops->raise_irq)
234 		return 0;
235 
236 	mutex_lock(&epc->lock);
237 	ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
238 	mutex_unlock(&epc->lock);
239 
240 	return ret;
241 }
242 EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
243 
244 /**
245  * pci_epc_map_msi_irq() - Map physical address to MSI address and return
246  *                         MSI data
247  * @epc: the EPC device which has the MSI capability
248  * @func_no: the physical endpoint function number in the EPC device
249  * @vfunc_no: the virtual endpoint function number in the physical function
250  * @phys_addr: the physical address of the outbound region
251  * @interrupt_num: the MSI interrupt number with range (1-N)
252  * @entry_size: Size of Outbound address region for each interrupt
253  * @msi_data: the data that should be written in order to raise MSI interrupt
254  *            with interrupt number as 'interrupt num'
255  * @msi_addr_offset: Offset of MSI address from the aligned outbound address
256  *                   to which the MSI address is mapped
257  *
258  * Invoke to map physical address to MSI address and return MSI data. The
259  * physical address should be an address in the outbound region. This is
260  * required to implement doorbell functionality of NTB wherein EPC on either
261  * side of the interface (primary and secondary) can directly write to the
262  * physical address (in outbound region) of the other interface to ring
263  * doorbell.
264  */
265 int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
266 			phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
267 			u32 *msi_data, u32 *msi_addr_offset)
268 {
269 	int ret;
270 
271 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
272 		return -EINVAL;
273 
274 	if (!epc->ops->map_msi_irq)
275 		return -EINVAL;
276 
277 	mutex_lock(&epc->lock);
278 	ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
279 				    interrupt_num, entry_size, msi_data,
280 				    msi_addr_offset);
281 	mutex_unlock(&epc->lock);
282 
283 	return ret;
284 }
285 EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
286 
287 /**
288  * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
289  * @epc: the EPC device to which MSI interrupts was requested
290  * @func_no: the physical endpoint function number in the EPC device
291  * @vfunc_no: the virtual endpoint function number in the physical function
292  *
293  * Invoke to get the number of MSI interrupts allocated by the RC
294  */
295 int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
296 {
297 	int interrupt;
298 
299 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
300 		return 0;
301 
302 	if (!epc->ops->get_msi)
303 		return 0;
304 
305 	mutex_lock(&epc->lock);
306 	interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
307 	mutex_unlock(&epc->lock);
308 
309 	if (interrupt < 0)
310 		return 0;
311 
312 	interrupt = 1 << interrupt;
313 
314 	return interrupt;
315 }
316 EXPORT_SYMBOL_GPL(pci_epc_get_msi);
317 
318 /**
319  * pci_epc_set_msi() - set the number of MSI interrupt numbers required
320  * @epc: the EPC device on which MSI has to be configured
321  * @func_no: the physical endpoint function number in the EPC device
322  * @vfunc_no: the virtual endpoint function number in the physical function
323  * @interrupts: number of MSI interrupts required by the EPF
324  *
325  * Invoke to set the required number of MSI interrupts.
326  */
327 int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
328 {
329 	int ret;
330 	u8 encode_int;
331 
332 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
333 		return -EINVAL;
334 
335 	if (interrupts < 1 || interrupts > 32)
336 		return -EINVAL;
337 
338 	if (!epc->ops->set_msi)
339 		return 0;
340 
341 	encode_int = order_base_2(interrupts);
342 
343 	mutex_lock(&epc->lock);
344 	ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
345 	mutex_unlock(&epc->lock);
346 
347 	return ret;
348 }
349 EXPORT_SYMBOL_GPL(pci_epc_set_msi);
350 
351 /**
352  * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
353  * @epc: the EPC device to which MSI-X interrupts was requested
354  * @func_no: the physical endpoint function number in the EPC device
355  * @vfunc_no: the virtual endpoint function number in the physical function
356  *
357  * Invoke to get the number of MSI-X interrupts allocated by the RC
358  */
359 int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
360 {
361 	int interrupt;
362 
363 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
364 		return 0;
365 
366 	if (!epc->ops->get_msix)
367 		return 0;
368 
369 	mutex_lock(&epc->lock);
370 	interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
371 	mutex_unlock(&epc->lock);
372 
373 	if (interrupt < 0)
374 		return 0;
375 
376 	return interrupt + 1;
377 }
378 EXPORT_SYMBOL_GPL(pci_epc_get_msix);
379 
380 /**
381  * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
382  * @epc: the EPC device on which MSI-X has to be configured
383  * @func_no: the physical endpoint function number in the EPC device
384  * @vfunc_no: the virtual endpoint function number in the physical function
385  * @interrupts: number of MSI-X interrupts required by the EPF
386  * @bir: BAR where the MSI-X table resides
387  * @offset: Offset pointing to the start of MSI-X table
388  *
389  * Invoke to set the required number of MSI-X interrupts.
390  */
391 int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
392 		     u16 interrupts, enum pci_barno bir, u32 offset)
393 {
394 	int ret;
395 
396 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
397 		return -EINVAL;
398 
399 	if (interrupts < 1 || interrupts > 2048)
400 		return -EINVAL;
401 
402 	if (!epc->ops->set_msix)
403 		return 0;
404 
405 	mutex_lock(&epc->lock);
406 	ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
407 				 offset);
408 	mutex_unlock(&epc->lock);
409 
410 	return ret;
411 }
412 EXPORT_SYMBOL_GPL(pci_epc_set_msix);
413 
414 /**
415  * pci_epc_unmap_addr() - unmap CPU address from PCI address
416  * @epc: the EPC device on which address is allocated
417  * @func_no: the physical endpoint function number in the EPC device
418  * @vfunc_no: the virtual endpoint function number in the physical function
419  * @phys_addr: physical address of the local system
420  *
421  * Invoke to unmap the CPU address from PCI address.
422  */
423 void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
424 			phys_addr_t phys_addr)
425 {
426 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
427 		return;
428 
429 	if (!epc->ops->unmap_addr)
430 		return;
431 
432 	mutex_lock(&epc->lock);
433 	epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
434 	mutex_unlock(&epc->lock);
435 }
436 EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
437 
438 /**
439  * pci_epc_map_addr() - map CPU address to PCI address
440  * @epc: the EPC device on which address is allocated
441  * @func_no: the physical endpoint function number in the EPC device
442  * @vfunc_no: the virtual endpoint function number in the physical function
443  * @phys_addr: physical address of the local system
444  * @pci_addr: PCI address to which the physical address should be mapped
445  * @size: the size of the allocation
446  *
447  * Invoke to map CPU address with PCI address.
448  */
449 int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
450 		     phys_addr_t phys_addr, u64 pci_addr, size_t size)
451 {
452 	int ret;
453 
454 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
455 		return -EINVAL;
456 
457 	if (!epc->ops->map_addr)
458 		return 0;
459 
460 	mutex_lock(&epc->lock);
461 	ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
462 				 size);
463 	mutex_unlock(&epc->lock);
464 
465 	return ret;
466 }
467 EXPORT_SYMBOL_GPL(pci_epc_map_addr);
468 
469 /**
470  * pci_epc_mem_map() - allocate and map a PCI address to a CPU address
471  * @epc: the EPC device on which the CPU address is to be allocated and mapped
472  * @func_no: the physical endpoint function number in the EPC device
473  * @vfunc_no: the virtual endpoint function number in the physical function
474  * @pci_addr: PCI address to which the CPU address should be mapped
475  * @pci_size: the number of bytes to map starting from @pci_addr
476  * @map: where to return the mapping information
477  *
478  * Allocate a controller memory address region and map it to a RC PCI address
479  * region, taking into account the controller physical address mapping
480  * constraints using the controller operation align_addr(). If this operation is
481  * not defined, we assume that there are no alignment constraints for the
482  * mapping.
483  *
484  * The effective size of the PCI address range mapped from @pci_addr is
485  * indicated by @map->pci_size. This size may be less than the requested
486  * @pci_size. The local virtual CPU address for the mapping is indicated by
487  * @map->virt_addr (@map->phys_addr indicates the physical address).
488  * The size and CPU address of the controller memory allocated and mapped are
489  * respectively indicated by @map->map_size and @map->virt_base (and
490  * @map->phys_base for the physical address of @map->virt_base).
491  *
492  * Returns 0 on success and a negative error code in case of error.
493  */
494 int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
495 		    u64 pci_addr, size_t pci_size, struct pci_epc_map *map)
496 {
497 	size_t map_size = pci_size;
498 	size_t map_offset = 0;
499 	int ret;
500 
501 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
502 		return -EINVAL;
503 
504 	if (!pci_size || !map)
505 		return -EINVAL;
506 
507 	/*
508 	 * Align the PCI address to map. If the controller defines the
509 	 * .align_addr() operation, use it to determine the PCI address to map
510 	 * and the size of the mapping. Otherwise, assume that the controller
511 	 * has no alignment constraint.
512 	 */
513 	memset(map, 0, sizeof(*map));
514 	map->pci_addr = pci_addr;
515 	if (epc->ops->align_addr)
516 		map->map_pci_addr =
517 			epc->ops->align_addr(epc, pci_addr,
518 					     &map_size, &map_offset);
519 	else
520 		map->map_pci_addr = pci_addr;
521 	map->map_size = map_size;
522 	if (map->map_pci_addr + map->map_size < pci_addr + pci_size)
523 		map->pci_size = map->map_pci_addr + map->map_size - pci_addr;
524 	else
525 		map->pci_size = pci_size;
526 
527 	map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base,
528 						map->map_size);
529 	if (!map->virt_base)
530 		return -ENOMEM;
531 
532 	map->phys_addr = map->phys_base + map_offset;
533 	map->virt_addr = map->virt_base + map_offset;
534 
535 	ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base,
536 			       map->map_pci_addr, map->map_size);
537 	if (ret) {
538 		pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
539 				      map->map_size);
540 		return ret;
541 	}
542 
543 	return 0;
544 }
545 EXPORT_SYMBOL_GPL(pci_epc_mem_map);
546 
547 /**
548  * pci_epc_mem_unmap() - unmap and free a CPU address region
549  * @epc: the EPC device on which the CPU address is allocated and mapped
550  * @func_no: the physical endpoint function number in the EPC device
551  * @vfunc_no: the virtual endpoint function number in the physical function
552  * @map: the mapping information
553  *
554  * Unmap and free a CPU address region that was allocated and mapped with
555  * pci_epc_mem_map().
556  */
557 void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
558 		       struct pci_epc_map *map)
559 {
560 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
561 		return;
562 
563 	if (!map || !map->virt_base)
564 		return;
565 
566 	pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base);
567 	pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
568 			      map->map_size);
569 }
570 EXPORT_SYMBOL_GPL(pci_epc_mem_unmap);
571 
572 /**
573  * pci_epc_clear_bar() - reset the BAR
574  * @epc: the EPC device for which the BAR has to be cleared
575  * @func_no: the physical endpoint function number in the EPC device
576  * @vfunc_no: the virtual endpoint function number in the physical function
577  * @epf_bar: the struct epf_bar that contains the BAR information
578  *
579  * Invoke to reset the BAR of the endpoint device.
580  */
581 void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
582 		       struct pci_epf_bar *epf_bar)
583 {
584 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
585 		return;
586 
587 	if (epf_bar->barno == BAR_5 &&
588 	    epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
589 		return;
590 
591 	if (!epc->ops->clear_bar)
592 		return;
593 
594 	mutex_lock(&epc->lock);
595 	epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
596 	mutex_unlock(&epc->lock);
597 }
598 EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
599 
600 /**
601  * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
602  * @epc: the EPC device on which BAR has to be configured
603  * @func_no: the physical endpoint function number in the EPC device
604  * @vfunc_no: the virtual endpoint function number in the physical function
605  * @epf_bar: the struct epf_bar that contains the BAR information
606  *
607  * Invoke to configure the BAR of the endpoint device.
608  */
609 int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
610 		    struct pci_epf_bar *epf_bar)
611 {
612 	int ret;
613 	int flags = epf_bar->flags;
614 
615 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
616 		return -EINVAL;
617 
618 	if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
619 	    (flags & PCI_BASE_ADDRESS_SPACE_IO &&
620 	     flags & PCI_BASE_ADDRESS_IO_MASK) ||
621 	    (upper_32_bits(epf_bar->size) &&
622 	     !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
623 		return -EINVAL;
624 
625 	if (!epc->ops->set_bar)
626 		return 0;
627 
628 	mutex_lock(&epc->lock);
629 	ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
630 	mutex_unlock(&epc->lock);
631 
632 	return ret;
633 }
634 EXPORT_SYMBOL_GPL(pci_epc_set_bar);
635 
636 /**
637  * pci_epc_write_header() - write standard configuration header
638  * @epc: the EPC device to which the configuration header should be written
639  * @func_no: the physical endpoint function number in the EPC device
640  * @vfunc_no: the virtual endpoint function number in the physical function
641  * @header: standard configuration header fields
642  *
643  * Invoke to write the configuration header to the endpoint controller. Every
644  * endpoint controller will have a dedicated location to which the standard
645  * configuration header would be written. The callback function should write
646  * the header fields to this dedicated location.
647  */
648 int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
649 			 struct pci_epf_header *header)
650 {
651 	int ret;
652 
653 	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
654 		return -EINVAL;
655 
656 	/* Only Virtual Function #1 has deviceID */
657 	if (vfunc_no > 1)
658 		return -EINVAL;
659 
660 	if (!epc->ops->write_header)
661 		return 0;
662 
663 	mutex_lock(&epc->lock);
664 	ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
665 	mutex_unlock(&epc->lock);
666 
667 	return ret;
668 }
669 EXPORT_SYMBOL_GPL(pci_epc_write_header);
670 
671 /**
672  * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
673  * @epc: the EPC device to which the endpoint function should be added
674  * @epf: the endpoint function to be added
675  * @type: Identifies if the EPC is connected to the primary or secondary
676  *        interface of EPF
677  *
678  * A PCI endpoint device can have one or more functions. In the case of PCIe,
679  * the specification allows up to 8 PCIe endpoint functions. Invoke
680  * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
681  */
682 int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
683 		    enum pci_epc_interface_type type)
684 {
685 	struct list_head *list;
686 	u32 func_no;
687 	int ret = 0;
688 
689 	if (IS_ERR_OR_NULL(epc) || epf->is_vf)
690 		return -EINVAL;
691 
692 	if (type == PRIMARY_INTERFACE && epf->epc)
693 		return -EBUSY;
694 
695 	if (type == SECONDARY_INTERFACE && epf->sec_epc)
696 		return -EBUSY;
697 
698 	mutex_lock(&epc->list_lock);
699 	func_no = find_first_zero_bit(&epc->function_num_map,
700 				      BITS_PER_LONG);
701 	if (func_no >= BITS_PER_LONG) {
702 		ret = -EINVAL;
703 		goto ret;
704 	}
705 
706 	if (func_no > epc->max_functions - 1) {
707 		dev_err(&epc->dev, "Exceeding max supported Function Number\n");
708 		ret = -EINVAL;
709 		goto ret;
710 	}
711 
712 	set_bit(func_no, &epc->function_num_map);
713 	if (type == PRIMARY_INTERFACE) {
714 		epf->func_no = func_no;
715 		epf->epc = epc;
716 		list = &epf->list;
717 	} else {
718 		epf->sec_epc_func_no = func_no;
719 		epf->sec_epc = epc;
720 		list = &epf->sec_epc_list;
721 	}
722 
723 	list_add_tail(list, &epc->pci_epf);
724 ret:
725 	mutex_unlock(&epc->list_lock);
726 
727 	return ret;
728 }
729 EXPORT_SYMBOL_GPL(pci_epc_add_epf);
730 
731 /**
732  * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
733  * @epc: the EPC device from which the endpoint function should be removed
734  * @epf: the endpoint function to be removed
735  * @type: identifies if the EPC is connected to the primary or secondary
736  *        interface of EPF
737  *
738  * Invoke to remove PCI endpoint function from the endpoint controller.
739  */
740 void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
741 			enum pci_epc_interface_type type)
742 {
743 	struct list_head *list;
744 	u32 func_no = 0;
745 
746 	if (IS_ERR_OR_NULL(epc) || !epf)
747 		return;
748 
749 	mutex_lock(&epc->list_lock);
750 	if (type == PRIMARY_INTERFACE) {
751 		func_no = epf->func_no;
752 		list = &epf->list;
753 		epf->epc = NULL;
754 	} else {
755 		func_no = epf->sec_epc_func_no;
756 		list = &epf->sec_epc_list;
757 		epf->sec_epc = NULL;
758 	}
759 	clear_bit(func_no, &epc->function_num_map);
760 	list_del(list);
761 	mutex_unlock(&epc->list_lock);
762 }
763 EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
764 
765 /**
766  * pci_epc_linkup() - Notify the EPF device that EPC device has established a
767  *		      connection with the Root Complex.
768  * @epc: the EPC device which has established link with the host
769  *
770  * Invoke to Notify the EPF device that the EPC device has established a
771  * connection with the Root Complex.
772  */
773 void pci_epc_linkup(struct pci_epc *epc)
774 {
775 	struct pci_epf *epf;
776 
777 	if (IS_ERR_OR_NULL(epc))
778 		return;
779 
780 	mutex_lock(&epc->list_lock);
781 	list_for_each_entry(epf, &epc->pci_epf, list) {
782 		mutex_lock(&epf->lock);
783 		if (epf->event_ops && epf->event_ops->link_up)
784 			epf->event_ops->link_up(epf);
785 		mutex_unlock(&epf->lock);
786 	}
787 	mutex_unlock(&epc->list_lock);
788 }
789 EXPORT_SYMBOL_GPL(pci_epc_linkup);
790 
791 /**
792  * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
793  *			connection with the Root Complex.
794  * @epc: the EPC device which has dropped the link with the host
795  *
796  * Invoke to Notify the EPF device that the EPC device has dropped the
797  * connection with the Root Complex.
798  */
799 void pci_epc_linkdown(struct pci_epc *epc)
800 {
801 	struct pci_epf *epf;
802 
803 	if (IS_ERR_OR_NULL(epc))
804 		return;
805 
806 	mutex_lock(&epc->list_lock);
807 	list_for_each_entry(epf, &epc->pci_epf, list) {
808 		mutex_lock(&epf->lock);
809 		if (epf->event_ops && epf->event_ops->link_down)
810 			epf->event_ops->link_down(epf);
811 		mutex_unlock(&epf->lock);
812 	}
813 	mutex_unlock(&epc->list_lock);
814 }
815 EXPORT_SYMBOL_GPL(pci_epc_linkdown);
816 
817 /**
818  * pci_epc_init_notify() - Notify the EPF device that EPC device initialization
819  *                         is completed.
820  * @epc: the EPC device whose initialization is completed
821  *
822  * Invoke to Notify the EPF device that the EPC device's initialization
823  * is completed.
824  */
825 void pci_epc_init_notify(struct pci_epc *epc)
826 {
827 	struct pci_epf *epf;
828 
829 	if (IS_ERR_OR_NULL(epc))
830 		return;
831 
832 	mutex_lock(&epc->list_lock);
833 	list_for_each_entry(epf, &epc->pci_epf, list) {
834 		mutex_lock(&epf->lock);
835 		if (epf->event_ops && epf->event_ops->epc_init)
836 			epf->event_ops->epc_init(epf);
837 		mutex_unlock(&epf->lock);
838 	}
839 	epc->init_complete = true;
840 	mutex_unlock(&epc->list_lock);
841 }
842 EXPORT_SYMBOL_GPL(pci_epc_init_notify);
843 
844 /**
845  * pci_epc_notify_pending_init() - Notify the pending EPC device initialization
846  *                                 complete to the EPF device
847  * @epc: the EPC device whose initialization is pending to be notified
848  * @epf: the EPF device to be notified
849  *
850  * Invoke to notify the pending EPC device initialization complete to the EPF
851  * device. This is used to deliver the notification if the EPC initialization
852  * got completed before the EPF driver bind.
853  */
854 void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
855 {
856 	if (epc->init_complete) {
857 		mutex_lock(&epf->lock);
858 		if (epf->event_ops && epf->event_ops->epc_init)
859 			epf->event_ops->epc_init(epf);
860 		mutex_unlock(&epf->lock);
861 	}
862 }
863 EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
864 
865 /**
866  * pci_epc_deinit_notify() - Notify the EPF device about EPC deinitialization
867  * @epc: the EPC device whose deinitialization is completed
868  *
869  * Invoke to notify the EPF device that the EPC deinitialization is completed.
870  */
871 void pci_epc_deinit_notify(struct pci_epc *epc)
872 {
873 	struct pci_epf *epf;
874 
875 	if (IS_ERR_OR_NULL(epc))
876 		return;
877 
878 	mutex_lock(&epc->list_lock);
879 	list_for_each_entry(epf, &epc->pci_epf, list) {
880 		mutex_lock(&epf->lock);
881 		if (epf->event_ops && epf->event_ops->epc_deinit)
882 			epf->event_ops->epc_deinit(epf);
883 		mutex_unlock(&epf->lock);
884 	}
885 	epc->init_complete = false;
886 	mutex_unlock(&epc->list_lock);
887 }
888 EXPORT_SYMBOL_GPL(pci_epc_deinit_notify);
889 
890 /**
891  * pci_epc_bus_master_enable_notify() - Notify the EPF device that the EPC
892  *					device has received the Bus Master
893  *					Enable event from the Root complex
894  * @epc: the EPC device that received the Bus Master Enable event
895  *
896  * Notify the EPF device that the EPC device has generated the Bus Master Enable
897  * event due to host setting the Bus Master Enable bit in the Command register.
898  */
899 void pci_epc_bus_master_enable_notify(struct pci_epc *epc)
900 {
901 	struct pci_epf *epf;
902 
903 	if (IS_ERR_OR_NULL(epc))
904 		return;
905 
906 	mutex_lock(&epc->list_lock);
907 	list_for_each_entry(epf, &epc->pci_epf, list) {
908 		mutex_lock(&epf->lock);
909 		if (epf->event_ops && epf->event_ops->bus_master_enable)
910 			epf->event_ops->bus_master_enable(epf);
911 		mutex_unlock(&epf->lock);
912 	}
913 	mutex_unlock(&epc->list_lock);
914 }
915 EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify);
916 
917 /**
918  * pci_epc_destroy() - destroy the EPC device
919  * @epc: the EPC device that has to be destroyed
920  *
921  * Invoke to destroy the PCI EPC device
922  */
923 void pci_epc_destroy(struct pci_epc *epc)
924 {
925 	pci_ep_cfs_remove_epc_group(epc->group);
926 #ifdef CONFIG_PCI_DOMAINS_GENERIC
927 	pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr);
928 #endif
929 	device_unregister(&epc->dev);
930 }
931 EXPORT_SYMBOL_GPL(pci_epc_destroy);
932 
933 /**
934  * devm_pci_epc_destroy() - destroy the EPC device
935  * @dev: device that wants to destroy the EPC
936  * @epc: the EPC device that has to be destroyed
937  *
938  * Invoke to destroy the devres associated with this
939  * pci_epc and destroy the EPC device.
940  */
941 void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
942 {
943 	int r;
944 
945 	r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
946 			   epc);
947 	dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
948 }
949 EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
950 
951 static void pci_epc_release(struct device *dev)
952 {
953 	kfree(to_pci_epc(dev));
954 }
955 
956 /**
957  * __pci_epc_create() - create a new endpoint controller (EPC) device
958  * @dev: device that is creating the new EPC
959  * @ops: function pointers for performing EPC operations
960  * @owner: the owner of the module that creates the EPC device
961  *
962  * Invoke to create a new EPC device and add it to pci_epc class.
963  */
964 struct pci_epc *
965 __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
966 		 struct module *owner)
967 {
968 	int ret;
969 	struct pci_epc *epc;
970 
971 	if (WARN_ON(!dev)) {
972 		ret = -EINVAL;
973 		goto err_ret;
974 	}
975 
976 	epc = kzalloc(sizeof(*epc), GFP_KERNEL);
977 	if (!epc) {
978 		ret = -ENOMEM;
979 		goto err_ret;
980 	}
981 
982 	mutex_init(&epc->lock);
983 	mutex_init(&epc->list_lock);
984 	INIT_LIST_HEAD(&epc->pci_epf);
985 
986 	device_initialize(&epc->dev);
987 	epc->dev.class = &pci_epc_class;
988 	epc->dev.parent = dev;
989 	epc->dev.release = pci_epc_release;
990 	epc->ops = ops;
991 
992 #ifdef CONFIG_PCI_DOMAINS_GENERIC
993 	epc->domain_nr = pci_bus_find_domain_nr(NULL, dev);
994 #else
995 	/*
996 	 * TODO: If the architecture doesn't support generic PCI
997 	 * domains, then a custom implementation has to be used.
998 	 */
999 	WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n");
1000 #endif
1001 
1002 	ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
1003 	if (ret)
1004 		goto put_dev;
1005 
1006 	ret = device_add(&epc->dev);
1007 	if (ret)
1008 		goto put_dev;
1009 
1010 	epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
1011 
1012 	return epc;
1013 
1014 put_dev:
1015 	put_device(&epc->dev);
1016 
1017 err_ret:
1018 	return ERR_PTR(ret);
1019 }
1020 EXPORT_SYMBOL_GPL(__pci_epc_create);
1021 
1022 /**
1023  * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
1024  * @dev: device that is creating the new EPC
1025  * @ops: function pointers for performing EPC operations
1026  * @owner: the owner of the module that creates the EPC device
1027  *
1028  * Invoke to create a new EPC device and add it to pci_epc class.
1029  * While at that, it also associates the device with the pci_epc using devres.
1030  * On driver detach, release function is invoked on the devres data,
1031  * then, devres data is freed.
1032  */
1033 struct pci_epc *
1034 __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
1035 		      struct module *owner)
1036 {
1037 	struct pci_epc **ptr, *epc;
1038 
1039 	ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
1040 	if (!ptr)
1041 		return ERR_PTR(-ENOMEM);
1042 
1043 	epc = __pci_epc_create(dev, ops, owner);
1044 	if (!IS_ERR(epc)) {
1045 		*ptr = epc;
1046 		devres_add(dev, ptr);
1047 	} else {
1048 		devres_free(ptr);
1049 	}
1050 
1051 	return epc;
1052 }
1053 EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
1054 
1055 static int __init pci_epc_init(void)
1056 {
1057 	return class_register(&pci_epc_class);
1058 }
1059 module_init(pci_epc_init);
1060 
1061 static void __exit pci_epc_exit(void)
1062 {
1063 	class_unregister(&pci_epc_class);
1064 }
1065 module_exit(pci_epc_exit);
1066 
1067 MODULE_DESCRIPTION("PCI EPC Library");
1068 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1069