xref: /linux/drivers/pci/endpoint/functions/pci-epf-mhi.c (revision 5a558f369ef89c6fd8170ee1137274fcc08517ae)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI EPF driver for MHI Endpoint devices
4  *
5  * Copyright (C) 2023 Linaro Ltd.
6  * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
7  */
8 
9 #include <linux/dmaengine.h>
10 #include <linux/mhi_ep.h>
11 #include <linux/module.h>
12 #include <linux/of_dma.h>
13 #include <linux/platform_device.h>
14 #include <linux/pci-epc.h>
15 #include <linux/pci-epf.h>
16 
17 #define MHI_VERSION_1_0 0x01000000
18 
19 #define to_epf_mhi(cntrl) container_of(cntrl, struct pci_epf_mhi, cntrl)
20 
21 /* Platform specific flags */
22 #define MHI_EPF_USE_DMA BIT(0)
23 
24 struct pci_epf_mhi_dma_transfer {
25 	struct pci_epf_mhi *epf_mhi;
26 	struct mhi_ep_buf_info buf_info;
27 	struct list_head node;
28 	dma_addr_t paddr;
29 	enum dma_data_direction dir;
30 	size_t size;
31 };
32 
33 struct pci_epf_mhi_ep_info {
34 	const struct mhi_ep_cntrl_config *config;
35 	struct pci_epf_header *epf_header;
36 	enum pci_barno bar_num;
37 	u32 epf_flags;
38 	u32 msi_count;
39 	u32 mru;
40 	u32 flags;
41 };
42 
43 #define MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, direction)	\
44 	{							\
45 		.num = ch_num,					\
46 		.name = ch_name,				\
47 		.dir = direction,				\
48 	}
49 
50 #define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name)		\
51 	MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_TO_DEVICE)
52 
53 #define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name)		\
54 	MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_FROM_DEVICE)
55 
56 static const struct mhi_ep_channel_config mhi_v1_channels[] = {
57 	MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"),
58 	MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"),
59 	MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"),
60 	MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"),
61 	MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"),
62 	MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"),
63 	MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"),
64 	MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"),
65 	MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"),
66 	MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"),
67 	MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"),
68 	MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"),
69 	MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"),
70 	MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"),
71 	MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"),
72 	MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"),
73 	MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"),
74 	MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"),
75 	MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"),
76 	MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"),
77 	MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"),
78 	MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"),
79 	MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"),
80 	MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"),
81 	MHI_EP_CHANNEL_CONFIG_UL(46, "IP_SW0"),
82 	MHI_EP_CHANNEL_CONFIG_DL(47, "IP_SW0"),
83 };
84 
85 static const struct mhi_ep_cntrl_config mhi_v1_config = {
86 	.max_channels = 128,
87 	.num_channels = ARRAY_SIZE(mhi_v1_channels),
88 	.ch_cfg = mhi_v1_channels,
89 	.mhi_version = MHI_VERSION_1_0,
90 };
91 
92 static struct pci_epf_header sdx55_header = {
93 	.vendorid = PCI_VENDOR_ID_QCOM,
94 	.deviceid = 0x0306,
95 	.baseclass_code = PCI_BASE_CLASS_COMMUNICATION,
96 	.subclass_code = PCI_CLASS_COMMUNICATION_MODEM & 0xff,
97 	.interrupt_pin	= PCI_INTERRUPT_INTA,
98 };
99 
100 static const struct pci_epf_mhi_ep_info sdx55_info = {
101 	.config = &mhi_v1_config,
102 	.epf_header = &sdx55_header,
103 	.bar_num = BAR_0,
104 	.epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
105 	.msi_count = 32,
106 	.mru = 0x8000,
107 };
108 
109 static struct pci_epf_header sm8450_header = {
110 	.vendorid = PCI_VENDOR_ID_QCOM,
111 	.deviceid = 0x0306,
112 	.baseclass_code = PCI_CLASS_OTHERS,
113 	.interrupt_pin = PCI_INTERRUPT_INTA,
114 };
115 
116 static const struct pci_epf_mhi_ep_info sm8450_info = {
117 	.config = &mhi_v1_config,
118 	.epf_header = &sm8450_header,
119 	.bar_num = BAR_0,
120 	.epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
121 	.msi_count = 32,
122 	.mru = 0x8000,
123 	.flags = MHI_EPF_USE_DMA,
124 };
125 
126 static struct pci_epf_header sa8775p_header = {
127 	.vendorid = PCI_VENDOR_ID_QCOM,
128 	.deviceid = 0x0306,               /* FIXME: Update deviceid for sa8775p EP */
129 	.baseclass_code = PCI_CLASS_OTHERS,
130 	.interrupt_pin = PCI_INTERRUPT_INTA,
131 };
132 
133 static const struct pci_epf_mhi_ep_info sa8775p_info = {
134 	.config = &mhi_v1_config,
135 	.epf_header = &sa8775p_header,
136 	.bar_num = BAR_0,
137 	.epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
138 	.msi_count = 32,
139 	.mru = 0x8000,
140 };
141 
142 struct pci_epf_mhi {
143 	const struct pci_epc_features *epc_features;
144 	const struct pci_epf_mhi_ep_info *info;
145 	struct mhi_ep_cntrl mhi_cntrl;
146 	struct pci_epf *epf;
147 	struct mutex lock;
148 	void __iomem *mmio;
149 	resource_size_t mmio_phys;
150 	struct dma_chan *dma_chan_tx;
151 	struct dma_chan *dma_chan_rx;
152 	struct workqueue_struct *dma_wq;
153 	struct work_struct dma_work;
154 	struct list_head dma_list;
155 	spinlock_t list_lock;
156 	u32 mmio_size;
157 	int irq;
158 };
159 
160 static size_t get_align_offset(struct pci_epf_mhi *epf_mhi, u64 addr)
161 {
162 	return addr & (epf_mhi->epc_features->align -1);
163 }
164 
165 static int __pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
166 				 phys_addr_t *paddr, void __iomem **vaddr,
167 				 size_t offset, size_t size)
168 {
169 	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
170 	struct pci_epf *epf = epf_mhi->epf;
171 	struct pci_epc *epc = epf->epc;
172 	int ret;
173 
174 	*vaddr = pci_epc_mem_alloc_addr(epc, paddr, size + offset);
175 	if (!*vaddr)
176 		return -ENOMEM;
177 
178 	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, *paddr,
179 			       pci_addr - offset, size + offset);
180 	if (ret) {
181 		pci_epc_mem_free_addr(epc, *paddr, *vaddr, size + offset);
182 		return ret;
183 	}
184 
185 	*paddr = *paddr + offset;
186 	*vaddr = *vaddr + offset;
187 
188 	return 0;
189 }
190 
191 static int pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
192 				 phys_addr_t *paddr, void __iomem **vaddr,
193 				 size_t size)
194 {
195 	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
196 	size_t offset = get_align_offset(epf_mhi, pci_addr);
197 
198 	return __pci_epf_mhi_alloc_map(mhi_cntrl, pci_addr, paddr, vaddr,
199 				      offset, size);
200 }
201 
202 static void __pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl,
203 				     u64 pci_addr, phys_addr_t paddr,
204 				     void __iomem *vaddr, size_t offset,
205 				     size_t size)
206 {
207 	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
208 	struct pci_epf *epf = epf_mhi->epf;
209 	struct pci_epc *epc = epf->epc;
210 
211 	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, paddr - offset);
212 	pci_epc_mem_free_addr(epc, paddr - offset, vaddr - offset,
213 			      size + offset);
214 }
215 
216 static void pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
217 				   phys_addr_t paddr, void __iomem *vaddr,
218 				   size_t size)
219 {
220 	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
221 	size_t offset = get_align_offset(epf_mhi, pci_addr);
222 
223 	__pci_epf_mhi_unmap_free(mhi_cntrl, pci_addr, paddr, vaddr, offset,
224 				 size);
225 }
226 
227 static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
228 {
229 	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
230 	struct pci_epf *epf = epf_mhi->epf;
231 	struct pci_epc *epc = epf->epc;
232 
233 	/*
234 	 * MHI supplies 0 based MSI vectors but the API expects the vector
235 	 * number to start from 1, so we need to increment the vector by 1.
236 	 */
237 	pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_IRQ_MSI,
238 			  vector + 1);
239 }
240 
241 static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
242 				 struct mhi_ep_buf_info *buf_info)
243 {
244 	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
245 	size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
246 	void __iomem *tre_buf;
247 	phys_addr_t tre_phys;
248 	int ret;
249 
250 	mutex_lock(&epf_mhi->lock);
251 
252 	ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
253 				      &tre_buf, offset, buf_info->size);
254 	if (ret) {
255 		mutex_unlock(&epf_mhi->lock);
256 		return ret;
257 	}
258 
259 	memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size);
260 
261 	__pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
262 				 tre_buf, offset, buf_info->size);
263 
264 	mutex_unlock(&epf_mhi->lock);
265 
266 	if (buf_info->cb)
267 		buf_info->cb(buf_info);
268 
269 	return 0;
270 }
271 
272 static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
273 				  struct mhi_ep_buf_info *buf_info)
274 {
275 	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
276 	size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
277 	void __iomem *tre_buf;
278 	phys_addr_t tre_phys;
279 	int ret;
280 
281 	mutex_lock(&epf_mhi->lock);
282 
283 	ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
284 				      &tre_buf, offset, buf_info->size);
285 	if (ret) {
286 		mutex_unlock(&epf_mhi->lock);
287 		return ret;
288 	}
289 
290 	memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size);
291 
292 	__pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
293 				 tre_buf, offset, buf_info->size);
294 
295 	mutex_unlock(&epf_mhi->lock);
296 
297 	if (buf_info->cb)
298 		buf_info->cb(buf_info);
299 
300 	return 0;
301 }
302 
303 static void pci_epf_mhi_dma_callback(void *param)
304 {
305 	complete(param);
306 }
307 
308 static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
309 				 struct mhi_ep_buf_info *buf_info)
310 {
311 	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
312 	struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
313 	struct dma_chan *chan = epf_mhi->dma_chan_rx;
314 	struct device *dev = &epf_mhi->epf->dev;
315 	DECLARE_COMPLETION_ONSTACK(complete);
316 	struct dma_async_tx_descriptor *desc;
317 	struct dma_slave_config config = {};
318 	dma_cookie_t cookie;
319 	dma_addr_t dst_addr;
320 	int ret;
321 
322 	if (buf_info->size < SZ_4K)
323 		return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
324 
325 	mutex_lock(&epf_mhi->lock);
326 
327 	config.direction = DMA_DEV_TO_MEM;
328 	config.src_addr = buf_info->host_addr;
329 
330 	ret = dmaengine_slave_config(chan, &config);
331 	if (ret) {
332 		dev_err(dev, "Failed to configure DMA channel\n");
333 		goto err_unlock;
334 	}
335 
336 	dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
337 				  DMA_FROM_DEVICE);
338 	ret = dma_mapping_error(dma_dev, dst_addr);
339 	if (ret) {
340 		dev_err(dev, "Failed to map remote memory\n");
341 		goto err_unlock;
342 	}
343 
344 	desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
345 					   DMA_DEV_TO_MEM,
346 					   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
347 	if (!desc) {
348 		dev_err(dev, "Failed to prepare DMA\n");
349 		ret = -EIO;
350 		goto err_unmap;
351 	}
352 
353 	desc->callback = pci_epf_mhi_dma_callback;
354 	desc->callback_param = &complete;
355 
356 	cookie = dmaengine_submit(desc);
357 	ret = dma_submit_error(cookie);
358 	if (ret) {
359 		dev_err(dev, "Failed to do DMA submit\n");
360 		goto err_unmap;
361 	}
362 
363 	dma_async_issue_pending(chan);
364 	ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
365 	if (!ret) {
366 		dev_err(dev, "DMA transfer timeout\n");
367 		dmaengine_terminate_sync(chan);
368 		ret = -ETIMEDOUT;
369 	}
370 
371 err_unmap:
372 	dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
373 err_unlock:
374 	mutex_unlock(&epf_mhi->lock);
375 
376 	return ret;
377 }
378 
379 static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
380 				  struct mhi_ep_buf_info *buf_info)
381 {
382 	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
383 	struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
384 	struct dma_chan *chan = epf_mhi->dma_chan_tx;
385 	struct device *dev = &epf_mhi->epf->dev;
386 	DECLARE_COMPLETION_ONSTACK(complete);
387 	struct dma_async_tx_descriptor *desc;
388 	struct dma_slave_config config = {};
389 	dma_cookie_t cookie;
390 	dma_addr_t src_addr;
391 	int ret;
392 
393 	if (buf_info->size < SZ_4K)
394 		return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
395 
396 	mutex_lock(&epf_mhi->lock);
397 
398 	config.direction = DMA_MEM_TO_DEV;
399 	config.dst_addr = buf_info->host_addr;
400 
401 	ret = dmaengine_slave_config(chan, &config);
402 	if (ret) {
403 		dev_err(dev, "Failed to configure DMA channel\n");
404 		goto err_unlock;
405 	}
406 
407 	src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
408 				  DMA_TO_DEVICE);
409 	ret = dma_mapping_error(dma_dev, src_addr);
410 	if (ret) {
411 		dev_err(dev, "Failed to map remote memory\n");
412 		goto err_unlock;
413 	}
414 
415 	desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
416 					   DMA_MEM_TO_DEV,
417 					   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
418 	if (!desc) {
419 		dev_err(dev, "Failed to prepare DMA\n");
420 		ret = -EIO;
421 		goto err_unmap;
422 	}
423 
424 	desc->callback = pci_epf_mhi_dma_callback;
425 	desc->callback_param = &complete;
426 
427 	cookie = dmaengine_submit(desc);
428 	ret = dma_submit_error(cookie);
429 	if (ret) {
430 		dev_err(dev, "Failed to do DMA submit\n");
431 		goto err_unmap;
432 	}
433 
434 	dma_async_issue_pending(chan);
435 	ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
436 	if (!ret) {
437 		dev_err(dev, "DMA transfer timeout\n");
438 		dmaengine_terminate_sync(chan);
439 		ret = -ETIMEDOUT;
440 	}
441 
442 err_unmap:
443 	dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
444 err_unlock:
445 	mutex_unlock(&epf_mhi->lock);
446 
447 	return ret;
448 }
449 
450 static void pci_epf_mhi_dma_worker(struct work_struct *work)
451 {
452 	struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, dma_work);
453 	struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
454 	struct pci_epf_mhi_dma_transfer *itr, *tmp;
455 	struct mhi_ep_buf_info *buf_info;
456 	unsigned long flags;
457 	LIST_HEAD(head);
458 
459 	spin_lock_irqsave(&epf_mhi->list_lock, flags);
460 	list_splice_tail_init(&epf_mhi->dma_list, &head);
461 	spin_unlock_irqrestore(&epf_mhi->list_lock, flags);
462 
463 	list_for_each_entry_safe(itr, tmp, &head, node) {
464 		list_del(&itr->node);
465 		dma_unmap_single(dma_dev, itr->paddr, itr->size, itr->dir);
466 		buf_info = &itr->buf_info;
467 		buf_info->cb(buf_info);
468 		kfree(itr);
469 	}
470 }
471 
472 static void pci_epf_mhi_dma_async_callback(void *param)
473 {
474 	struct pci_epf_mhi_dma_transfer *transfer = param;
475 	struct pci_epf_mhi *epf_mhi = transfer->epf_mhi;
476 
477 	spin_lock(&epf_mhi->list_lock);
478 	list_add_tail(&transfer->node, &epf_mhi->dma_list);
479 	spin_unlock(&epf_mhi->list_lock);
480 
481 	queue_work(epf_mhi->dma_wq, &epf_mhi->dma_work);
482 }
483 
484 static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl,
485 				       struct mhi_ep_buf_info *buf_info)
486 {
487 	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
488 	struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
489 	struct pci_epf_mhi_dma_transfer *transfer = NULL;
490 	struct dma_chan *chan = epf_mhi->dma_chan_rx;
491 	struct device *dev = &epf_mhi->epf->dev;
492 	DECLARE_COMPLETION_ONSTACK(complete);
493 	struct dma_async_tx_descriptor *desc;
494 	struct dma_slave_config config = {};
495 	dma_cookie_t cookie;
496 	dma_addr_t dst_addr;
497 	int ret;
498 
499 	mutex_lock(&epf_mhi->lock);
500 
501 	config.direction = DMA_DEV_TO_MEM;
502 	config.src_addr = buf_info->host_addr;
503 
504 	ret = dmaengine_slave_config(chan, &config);
505 	if (ret) {
506 		dev_err(dev, "Failed to configure DMA channel\n");
507 		goto err_unlock;
508 	}
509 
510 	dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
511 				  DMA_FROM_DEVICE);
512 	ret = dma_mapping_error(dma_dev, dst_addr);
513 	if (ret) {
514 		dev_err(dev, "Failed to map remote memory\n");
515 		goto err_unlock;
516 	}
517 
518 	desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
519 					   DMA_DEV_TO_MEM,
520 					   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
521 	if (!desc) {
522 		dev_err(dev, "Failed to prepare DMA\n");
523 		ret = -EIO;
524 		goto err_unmap;
525 	}
526 
527 	transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
528 	if (!transfer) {
529 		ret = -ENOMEM;
530 		goto err_unmap;
531 	}
532 
533 	transfer->epf_mhi = epf_mhi;
534 	transfer->paddr = dst_addr;
535 	transfer->size = buf_info->size;
536 	transfer->dir = DMA_FROM_DEVICE;
537 	memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
538 
539 	desc->callback = pci_epf_mhi_dma_async_callback;
540 	desc->callback_param = transfer;
541 
542 	cookie = dmaengine_submit(desc);
543 	ret = dma_submit_error(cookie);
544 	if (ret) {
545 		dev_err(dev, "Failed to do DMA submit\n");
546 		goto err_free_transfer;
547 	}
548 
549 	dma_async_issue_pending(chan);
550 
551 	goto err_unlock;
552 
553 err_free_transfer:
554 	kfree(transfer);
555 err_unmap:
556 	dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
557 err_unlock:
558 	mutex_unlock(&epf_mhi->lock);
559 
560 	return ret;
561 }
562 
563 static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl,
564 					struct mhi_ep_buf_info *buf_info)
565 {
566 	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
567 	struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
568 	struct pci_epf_mhi_dma_transfer *transfer = NULL;
569 	struct dma_chan *chan = epf_mhi->dma_chan_tx;
570 	struct device *dev = &epf_mhi->epf->dev;
571 	DECLARE_COMPLETION_ONSTACK(complete);
572 	struct dma_async_tx_descriptor *desc;
573 	struct dma_slave_config config = {};
574 	dma_cookie_t cookie;
575 	dma_addr_t src_addr;
576 	int ret;
577 
578 	mutex_lock(&epf_mhi->lock);
579 
580 	config.direction = DMA_MEM_TO_DEV;
581 	config.dst_addr = buf_info->host_addr;
582 
583 	ret = dmaengine_slave_config(chan, &config);
584 	if (ret) {
585 		dev_err(dev, "Failed to configure DMA channel\n");
586 		goto err_unlock;
587 	}
588 
589 	src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
590 				  DMA_TO_DEVICE);
591 	ret = dma_mapping_error(dma_dev, src_addr);
592 	if (ret) {
593 		dev_err(dev, "Failed to map remote memory\n");
594 		goto err_unlock;
595 	}
596 
597 	desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
598 					   DMA_MEM_TO_DEV,
599 					   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
600 	if (!desc) {
601 		dev_err(dev, "Failed to prepare DMA\n");
602 		ret = -EIO;
603 		goto err_unmap;
604 	}
605 
606 	transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
607 	if (!transfer) {
608 		ret = -ENOMEM;
609 		goto err_unmap;
610 	}
611 
612 	transfer->epf_mhi = epf_mhi;
613 	transfer->paddr = src_addr;
614 	transfer->size = buf_info->size;
615 	transfer->dir = DMA_TO_DEVICE;
616 	memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
617 
618 	desc->callback = pci_epf_mhi_dma_async_callback;
619 	desc->callback_param = transfer;
620 
621 	cookie = dmaengine_submit(desc);
622 	ret = dma_submit_error(cookie);
623 	if (ret) {
624 		dev_err(dev, "Failed to do DMA submit\n");
625 		goto err_free_transfer;
626 	}
627 
628 	dma_async_issue_pending(chan);
629 
630 	goto err_unlock;
631 
632 err_free_transfer:
633 	kfree(transfer);
634 err_unmap:
635 	dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
636 err_unlock:
637 	mutex_unlock(&epf_mhi->lock);
638 
639 	return ret;
640 }
641 
642 struct epf_dma_filter {
643 	struct device *dev;
644 	u32 dma_mask;
645 };
646 
647 static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
648 {
649 	struct epf_dma_filter *filter = node;
650 	struct dma_slave_caps caps;
651 
652 	memset(&caps, 0, sizeof(caps));
653 	dma_get_slave_caps(chan, &caps);
654 
655 	return chan->device->dev == filter->dev && filter->dma_mask &
656 					caps.directions;
657 }
658 
659 static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
660 {
661 	struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
662 	struct device *dev = &epf_mhi->epf->dev;
663 	struct epf_dma_filter filter;
664 	dma_cap_mask_t mask;
665 	int ret;
666 
667 	dma_cap_zero(mask);
668 	dma_cap_set(DMA_SLAVE, mask);
669 
670 	filter.dev = dma_dev;
671 	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
672 	epf_mhi->dma_chan_tx = dma_request_channel(mask, pci_epf_mhi_filter,
673 						   &filter);
674 	if (IS_ERR_OR_NULL(epf_mhi->dma_chan_tx)) {
675 		dev_err(dev, "Failed to request tx channel\n");
676 		return -ENODEV;
677 	}
678 
679 	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
680 	epf_mhi->dma_chan_rx = dma_request_channel(mask, pci_epf_mhi_filter,
681 						   &filter);
682 	if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
683 		dev_err(dev, "Failed to request rx channel\n");
684 		ret = -ENODEV;
685 		goto err_release_tx;
686 	}
687 
688 	epf_mhi->dma_wq = alloc_workqueue("pci_epf_mhi_dma_wq", 0, 0);
689 	if (!epf_mhi->dma_wq) {
690 		ret = -ENOMEM;
691 		goto err_release_rx;
692 	}
693 
694 	INIT_LIST_HEAD(&epf_mhi->dma_list);
695 	INIT_WORK(&epf_mhi->dma_work, pci_epf_mhi_dma_worker);
696 	spin_lock_init(&epf_mhi->list_lock);
697 
698 	return 0;
699 
700 err_release_rx:
701 	dma_release_channel(epf_mhi->dma_chan_rx);
702 	epf_mhi->dma_chan_rx = NULL;
703 err_release_tx:
704 	dma_release_channel(epf_mhi->dma_chan_tx);
705 	epf_mhi->dma_chan_tx = NULL;
706 
707 	return ret;
708 }
709 
710 static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
711 {
712 	destroy_workqueue(epf_mhi->dma_wq);
713 	dma_release_channel(epf_mhi->dma_chan_tx);
714 	dma_release_channel(epf_mhi->dma_chan_rx);
715 	epf_mhi->dma_chan_tx = NULL;
716 	epf_mhi->dma_chan_rx = NULL;
717 }
718 
719 static int pci_epf_mhi_core_init(struct pci_epf *epf)
720 {
721 	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
722 	const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
723 	struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
724 	struct pci_epc *epc = epf->epc;
725 	struct device *dev = &epf->dev;
726 	int ret;
727 
728 	epf_bar->phys_addr = epf_mhi->mmio_phys;
729 	epf_bar->size = epf_mhi->mmio_size;
730 	epf_bar->barno = info->bar_num;
731 	epf_bar->flags = info->epf_flags;
732 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
733 	if (ret) {
734 		dev_err(dev, "Failed to set BAR: %d\n", ret);
735 		return ret;
736 	}
737 
738 	ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
739 			      order_base_2(info->msi_count));
740 	if (ret) {
741 		dev_err(dev, "Failed to set MSI configuration: %d\n", ret);
742 		return ret;
743 	}
744 
745 	ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no,
746 				   epf->header);
747 	if (ret) {
748 		dev_err(dev, "Failed to set Configuration header: %d\n", ret);
749 		return ret;
750 	}
751 
752 	epf_mhi->epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
753 	if (!epf_mhi->epc_features)
754 		return -ENODATA;
755 
756 	return 0;
757 }
758 
759 static int pci_epf_mhi_link_up(struct pci_epf *epf)
760 {
761 	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
762 	const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
763 	struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
764 	struct pci_epc *epc = epf->epc;
765 	struct device *dev = &epf->dev;
766 	int ret;
767 
768 	if (info->flags & MHI_EPF_USE_DMA) {
769 		ret = pci_epf_mhi_dma_init(epf_mhi);
770 		if (ret) {
771 			dev_err(dev, "Failed to initialize DMA: %d\n", ret);
772 			return ret;
773 		}
774 	}
775 
776 	mhi_cntrl->mmio = epf_mhi->mmio;
777 	mhi_cntrl->irq = epf_mhi->irq;
778 	mhi_cntrl->mru = info->mru;
779 
780 	/* Assign the struct dev of PCI EP as MHI controller device */
781 	mhi_cntrl->cntrl_dev = epc->dev.parent;
782 	mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
783 	mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
784 	mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
785 	mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
786 	mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
787 	if (info->flags & MHI_EPF_USE_DMA) {
788 		mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
789 		mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
790 		mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
791 		mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
792 	}
793 
794 	/* Register the MHI EP controller */
795 	ret = mhi_ep_register_controller(mhi_cntrl, info->config);
796 	if (ret) {
797 		dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
798 		if (info->flags & MHI_EPF_USE_DMA)
799 			pci_epf_mhi_dma_deinit(epf_mhi);
800 		return ret;
801 	}
802 
803 	return 0;
804 }
805 
806 static int pci_epf_mhi_link_down(struct pci_epf *epf)
807 {
808 	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
809 	const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
810 	struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
811 
812 	if (mhi_cntrl->mhi_dev) {
813 		mhi_ep_power_down(mhi_cntrl);
814 		if (info->flags & MHI_EPF_USE_DMA)
815 			pci_epf_mhi_dma_deinit(epf_mhi);
816 		mhi_ep_unregister_controller(mhi_cntrl);
817 	}
818 
819 	return 0;
820 }
821 
822 static int pci_epf_mhi_bme(struct pci_epf *epf)
823 {
824 	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
825 	const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
826 	struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
827 	struct device *dev = &epf->dev;
828 	int ret;
829 
830 	/*
831 	 * Power up the MHI EP stack if link is up and stack is in power down
832 	 * state.
833 	 */
834 	if (!mhi_cntrl->enabled && mhi_cntrl->mhi_dev) {
835 		ret = mhi_ep_power_up(mhi_cntrl);
836 		if (ret) {
837 			dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
838 			if (info->flags & MHI_EPF_USE_DMA)
839 				pci_epf_mhi_dma_deinit(epf_mhi);
840 			mhi_ep_unregister_controller(mhi_cntrl);
841 		}
842 	}
843 
844 	return 0;
845 }
846 
847 static int pci_epf_mhi_bind(struct pci_epf *epf)
848 {
849 	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
850 	struct pci_epc *epc = epf->epc;
851 	struct platform_device *pdev = to_platform_device(epc->dev.parent);
852 	struct resource *res;
853 	int ret;
854 
855 	/* Get MMIO base address from Endpoint controller */
856 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
857 	epf_mhi->mmio_phys = res->start;
858 	epf_mhi->mmio_size = resource_size(res);
859 
860 	epf_mhi->mmio = ioremap(epf_mhi->mmio_phys, epf_mhi->mmio_size);
861 	if (!epf_mhi->mmio)
862 		return -ENOMEM;
863 
864 	ret = platform_get_irq_byname(pdev, "doorbell");
865 	if (ret < 0) {
866 		iounmap(epf_mhi->mmio);
867 		return ret;
868 	}
869 
870 	epf_mhi->irq = ret;
871 
872 	return 0;
873 }
874 
875 static void pci_epf_mhi_unbind(struct pci_epf *epf)
876 {
877 	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
878 	const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
879 	struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
880 	struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
881 	struct pci_epc *epc = epf->epc;
882 
883 	/*
884 	 * Forcefully power down the MHI EP stack. Only way to bring the MHI EP
885 	 * stack back to working state after successive bind is by getting BME
886 	 * from host.
887 	 */
888 	if (mhi_cntrl->mhi_dev) {
889 		mhi_ep_power_down(mhi_cntrl);
890 		if (info->flags & MHI_EPF_USE_DMA)
891 			pci_epf_mhi_dma_deinit(epf_mhi);
892 		mhi_ep_unregister_controller(mhi_cntrl);
893 	}
894 
895 	iounmap(epf_mhi->mmio);
896 	pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
897 }
898 
899 static const struct pci_epc_event_ops pci_epf_mhi_event_ops = {
900 	.core_init = pci_epf_mhi_core_init,
901 	.link_up = pci_epf_mhi_link_up,
902 	.link_down = pci_epf_mhi_link_down,
903 	.bme = pci_epf_mhi_bme,
904 };
905 
906 static int pci_epf_mhi_probe(struct pci_epf *epf,
907 			     const struct pci_epf_device_id *id)
908 {
909 	struct pci_epf_mhi_ep_info *info =
910 			(struct pci_epf_mhi_ep_info *)id->driver_data;
911 	struct pci_epf_mhi *epf_mhi;
912 	struct device *dev = &epf->dev;
913 
914 	epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL);
915 	if (!epf_mhi)
916 		return -ENOMEM;
917 
918 	epf->header = info->epf_header;
919 	epf_mhi->info = info;
920 	epf_mhi->epf = epf;
921 
922 	epf->event_ops = &pci_epf_mhi_event_ops;
923 
924 	mutex_init(&epf_mhi->lock);
925 
926 	epf_set_drvdata(epf, epf_mhi);
927 
928 	return 0;
929 }
930 
931 static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
932 	{ .name = "pci_epf_mhi_sa8775p", .driver_data = (kernel_ulong_t)&sa8775p_info },
933 	{ .name = "pci_epf_mhi_sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
934 	{ .name = "pci_epf_mhi_sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
935 	{},
936 };
937 
938 static const struct pci_epf_ops pci_epf_mhi_ops = {
939 	.unbind	= pci_epf_mhi_unbind,
940 	.bind	= pci_epf_mhi_bind,
941 };
942 
943 static struct pci_epf_driver pci_epf_mhi_driver = {
944 	.driver.name	= "pci_epf_mhi",
945 	.probe		= pci_epf_mhi_probe,
946 	.id_table	= pci_epf_mhi_ids,
947 	.ops		= &pci_epf_mhi_ops,
948 	.owner		= THIS_MODULE,
949 };
950 
951 static int __init pci_epf_mhi_init(void)
952 {
953 	return pci_epf_register_driver(&pci_epf_mhi_driver);
954 }
955 module_init(pci_epf_mhi_init);
956 
957 static void __exit pci_epf_mhi_exit(void)
958 {
959 	pci_epf_unregister_driver(&pci_epf_mhi_driver);
960 }
961 module_exit(pci_epf_mhi_exit);
962 
963 MODULE_DESCRIPTION("PCI EPF driver for MHI Endpoint devices");
964 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
965 MODULE_LICENSE("GPL");
966