xref: /linux/drivers/net/wwan/iosm/iosm_ipc_pcie.c (revision 497e6b37b0099dc415578488287fd84fb74433eb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/acpi.h>
7 #include <linux/bitfield.h>
8 #include <linux/module.h>
9 #include <net/rtnetlink.h>
10 
11 #include "iosm_ipc_imem.h"
12 #include "iosm_ipc_pcie.h"
13 #include "iosm_ipc_protocol.h"
14 
15 MODULE_DESCRIPTION("IOSM Driver");
16 MODULE_LICENSE("GPL v2");
17 
18 /* WWAN GUID */
19 static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
20 				       0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
21 
22 static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
23 {
24 	/* Free the MSI resources. */
25 	ipc_release_irq(ipc_pcie);
26 
27 	/* Free mapped doorbell scratchpad bus memory into CPU space. */
28 	iounmap(ipc_pcie->scratchpad);
29 
30 	/* Free mapped IPC_REGS bus memory into CPU space. */
31 	iounmap(ipc_pcie->ipc_regs);
32 
33 	/* Releases all PCI I/O and memory resources previously reserved by a
34 	 * successful call to pci_request_regions.  Call this function only
35 	 * after all use of the PCI regions has ceased.
36 	 */
37 	pci_release_regions(ipc_pcie->pci);
38 }
39 
40 static void ipc_pcie_cleanup(struct iosm_pcie *ipc_pcie)
41 {
42 	/* Free the shared memory resources. */
43 	ipc_imem_cleanup(ipc_pcie->imem);
44 
45 	ipc_pcie_resources_release(ipc_pcie);
46 
47 	/* Signal to the system that the PCI device is not in use. */
48 	pci_disable_device(ipc_pcie->pci);
49 }
50 
51 static void ipc_pcie_deinit(struct iosm_pcie *ipc_pcie)
52 {
53 	kfree(ipc_pcie->imem);
54 	kfree(ipc_pcie);
55 }
56 
57 static void ipc_pcie_remove(struct pci_dev *pci)
58 {
59 	struct iosm_pcie *ipc_pcie = pci_get_drvdata(pci);
60 
61 	ipc_pcie_cleanup(ipc_pcie);
62 
63 	ipc_pcie_deinit(ipc_pcie);
64 }
65 
66 static int ipc_pcie_resources_request(struct iosm_pcie *ipc_pcie)
67 {
68 	struct pci_dev *pci = ipc_pcie->pci;
69 	u32 cap = 0;
70 	u32 ret;
71 
72 	/* Reserved PCI I/O and memory resources.
73 	 * Mark all PCI regions associated with PCI device pci as
74 	 * being reserved by owner IOSM_IPC.
75 	 */
76 	ret = pci_request_regions(pci, "IOSM_IPC");
77 	if (ret) {
78 		dev_err(ipc_pcie->dev, "failed pci request regions");
79 		goto pci_request_region_fail;
80 	}
81 
82 	/* Reserve the doorbell IPC REGS memory resources.
83 	 * Remap the memory into CPU space. Arrange for the physical address
84 	 * (BAR) to be visible from this driver.
85 	 * pci_ioremap_bar() ensures that the memory is marked uncachable.
86 	 */
87 	ipc_pcie->ipc_regs = pci_ioremap_bar(pci, ipc_pcie->ipc_regs_bar_nr);
88 
89 	if (!ipc_pcie->ipc_regs) {
90 		dev_err(ipc_pcie->dev, "IPC REGS ioremap error");
91 		ret = -EBUSY;
92 		goto ipc_regs_remap_fail;
93 	}
94 
95 	/* Reserve the MMIO scratchpad memory resources.
96 	 * Remap the memory into CPU space. Arrange for the physical address
97 	 * (BAR) to be visible from this driver.
98 	 * pci_ioremap_bar() ensures that the memory is marked uncachable.
99 	 */
100 	ipc_pcie->scratchpad =
101 		pci_ioremap_bar(pci, ipc_pcie->scratchpad_bar_nr);
102 
103 	if (!ipc_pcie->scratchpad) {
104 		dev_err(ipc_pcie->dev, "doorbell scratchpad ioremap error");
105 		ret = -EBUSY;
106 		goto scratch_remap_fail;
107 	}
108 
109 	/* Install the irq handler triggered by CP. */
110 	ret = ipc_acquire_irq(ipc_pcie);
111 	if (ret) {
112 		dev_err(ipc_pcie->dev, "acquiring MSI irq failed!");
113 		goto irq_acquire_fail;
114 	}
115 
116 	/* Enable bus-mastering for the IOSM IPC device. */
117 	pci_set_master(pci);
118 
119 	/* Enable LTR if possible
120 	 * This is needed for L1.2!
121 	 */
122 	pcie_capability_read_dword(ipc_pcie->pci, PCI_EXP_DEVCAP2, &cap);
123 	if (cap & PCI_EXP_DEVCAP2_LTR)
124 		pcie_capability_set_word(ipc_pcie->pci, PCI_EXP_DEVCTL2,
125 					 PCI_EXP_DEVCTL2_LTR_EN);
126 
127 	dev_dbg(ipc_pcie->dev, "link between AP and CP is fully on");
128 
129 	return ret;
130 
131 irq_acquire_fail:
132 	iounmap(ipc_pcie->scratchpad);
133 scratch_remap_fail:
134 	iounmap(ipc_pcie->ipc_regs);
135 ipc_regs_remap_fail:
136 	pci_release_regions(pci);
137 pci_request_region_fail:
138 	return ret;
139 }
140 
141 bool ipc_pcie_check_aspm_enabled(struct iosm_pcie *ipc_pcie,
142 				 bool parent)
143 {
144 	struct pci_dev *pdev;
145 	u16 value = 0;
146 	u32 enabled;
147 
148 	if (parent)
149 		pdev = ipc_pcie->pci->bus->self;
150 	else
151 		pdev = ipc_pcie->pci;
152 
153 	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &value);
154 	enabled = value & PCI_EXP_LNKCTL_ASPMC;
155 	dev_dbg(ipc_pcie->dev, "ASPM L1: 0x%04X 0x%03X", pdev->device, value);
156 
157 	return (enabled == PCI_EXP_LNKCTL_ASPM_L1 ||
158 		enabled == PCI_EXP_LNKCTL_ASPMC);
159 }
160 
161 bool ipc_pcie_check_data_link_active(struct iosm_pcie *ipc_pcie)
162 {
163 	struct pci_dev *parent;
164 	u16 link_status = 0;
165 
166 	if (!ipc_pcie->pci->bus || !ipc_pcie->pci->bus->self) {
167 		dev_err(ipc_pcie->dev, "root port not found");
168 		return false;
169 	}
170 
171 	parent = ipc_pcie->pci->bus->self;
172 
173 	pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &link_status);
174 	dev_dbg(ipc_pcie->dev, "Link status: 0x%04X", link_status);
175 
176 	return link_status & PCI_EXP_LNKSTA_DLLLA;
177 }
178 
179 static bool ipc_pcie_check_aspm_supported(struct iosm_pcie *ipc_pcie,
180 					  bool parent)
181 {
182 	struct pci_dev *pdev;
183 	u32 support;
184 	u32 cap = 0;
185 
186 	if (parent)
187 		pdev = ipc_pcie->pci->bus->self;
188 	else
189 		pdev = ipc_pcie->pci;
190 	pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &cap);
191 	support = u32_get_bits(cap, PCI_EXP_LNKCAP_ASPMS);
192 	if (support < PCI_EXP_LNKCTL_ASPM_L1) {
193 		dev_dbg(ipc_pcie->dev, "ASPM L1 not supported: 0x%04X",
194 			pdev->device);
195 		return false;
196 	}
197 	return true;
198 }
199 
200 void ipc_pcie_config_aspm(struct iosm_pcie *ipc_pcie)
201 {
202 	bool parent_aspm_enabled, dev_aspm_enabled;
203 
204 	/* check if both root port and child supports ASPM L1 */
205 	if (!ipc_pcie_check_aspm_supported(ipc_pcie, true) ||
206 	    !ipc_pcie_check_aspm_supported(ipc_pcie, false))
207 		return;
208 
209 	parent_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, true);
210 	dev_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, false);
211 
212 	dev_dbg(ipc_pcie->dev, "ASPM parent: %s device: %s",
213 		parent_aspm_enabled ? "Enabled" : "Disabled",
214 		dev_aspm_enabled ? "Enabled" : "Disabled");
215 }
216 
217 /* Initializes PCIe endpoint configuration */
218 static void ipc_pcie_config_init(struct iosm_pcie *ipc_pcie)
219 {
220 	/* BAR0 is used for doorbell */
221 	ipc_pcie->ipc_regs_bar_nr = IPC_DOORBELL_BAR0;
222 
223 	/* update HW configuration */
224 	ipc_pcie->scratchpad_bar_nr = IPC_SCRATCHPAD_BAR2;
225 	ipc_pcie->doorbell_reg_offset = IPC_DOORBELL_CH_OFFSET;
226 	ipc_pcie->doorbell_write = IPC_WRITE_PTR_REG_0;
227 	ipc_pcie->doorbell_capture = IPC_CAPTURE_PTR_REG_0;
228 }
229 
230 /* This will read the BIOS WWAN RTD3 settings:
231  * D0L1.2/D3L2/Disabled
232  */
233 static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
234 {
235 	enum ipc_pcie_sleep_state sleep_state = IPC_PCIE_D0L12;
236 	union acpi_object *object;
237 	acpi_handle handle_acpi;
238 
239 	handle_acpi = ACPI_HANDLE(dev);
240 	if (!handle_acpi) {
241 		pr_debug("pci device is NOT ACPI supporting device\n");
242 		goto default_ret;
243 	}
244 
245 	object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL);
246 	if (!object)
247 		goto default_ret;
248 
249 	if (object->integer.value == 3)
250 		sleep_state = IPC_PCIE_D3L2;
251 
252 	ACPI_FREE(object);
253 
254 default_ret:
255 	return sleep_state;
256 }
257 
258 static int ipc_pcie_probe(struct pci_dev *pci,
259 			  const struct pci_device_id *pci_id)
260 {
261 	struct iosm_pcie *ipc_pcie = kzalloc(sizeof(*ipc_pcie), GFP_KERNEL);
262 	int ret;
263 
264 	pr_debug("Probing device 0x%X from the vendor 0x%X", pci_id->device,
265 		 pci_id->vendor);
266 
267 	if (!ipc_pcie)
268 		goto ret_fail;
269 
270 	/* Initialize ipc dbg component for the PCIe device */
271 	ipc_pcie->dev = &pci->dev;
272 
273 	/* Set the driver specific data. */
274 	pci_set_drvdata(pci, ipc_pcie);
275 
276 	/* Save the address of the PCI device configuration. */
277 	ipc_pcie->pci = pci;
278 
279 	/* Update platform configuration */
280 	ipc_pcie_config_init(ipc_pcie);
281 
282 	/* Initialize the device before it is used. Ask low-level code
283 	 * to enable I/O and memory. Wake up the device if it was suspended.
284 	 */
285 	if (pci_enable_device(pci)) {
286 		dev_err(ipc_pcie->dev, "failed to enable the AP PCIe device");
287 		/* If enable of PCIe device has failed then calling
288 		 * ipc_pcie_cleanup will panic the system. More over
289 		 * ipc_pcie_cleanup() is required to be called after
290 		 * ipc_imem_mount()
291 		 */
292 		goto pci_enable_fail;
293 	}
294 
295 	ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
296 	if (ret) {
297 		dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
298 		return ret;
299 	}
300 
301 	ipc_pcie_config_aspm(ipc_pcie);
302 	dev_dbg(ipc_pcie->dev, "PCIe device enabled.");
303 
304 	/* Read WWAN RTD3 BIOS Setting
305 	 */
306 	ipc_pcie->d3l2_support = ipc_pcie_read_bios_cfg(&pci->dev);
307 
308 	ipc_pcie->suspend = 0;
309 
310 	if (ipc_pcie_resources_request(ipc_pcie))
311 		goto resources_req_fail;
312 
313 	/* Establish the link to the imem layer. */
314 	ipc_pcie->imem = ipc_imem_init(ipc_pcie, pci->device,
315 				       ipc_pcie->scratchpad, ipc_pcie->dev);
316 	if (!ipc_pcie->imem) {
317 		dev_err(ipc_pcie->dev, "failed to init imem");
318 		goto imem_init_fail;
319 	}
320 
321 	return 0;
322 
323 imem_init_fail:
324 	ipc_pcie_resources_release(ipc_pcie);
325 resources_req_fail:
326 	pci_disable_device(pci);
327 pci_enable_fail:
328 	kfree(ipc_pcie);
329 ret_fail:
330 	return -EIO;
331 }
332 
333 static const struct pci_device_id iosm_ipc_ids[] = {
334 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) },
335 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7360_ID) },
336 	{}
337 };
338 MODULE_DEVICE_TABLE(pci, iosm_ipc_ids);
339 
340 /* Enter sleep in s2idle case
341  */
342 static int __maybe_unused ipc_pcie_suspend_s2idle(struct iosm_pcie *ipc_pcie)
343 {
344 	ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_SLEEP);
345 
346 	/* Complete all memory stores before setting bit */
347 	smp_mb__before_atomic();
348 
349 	set_bit(0, &ipc_pcie->suspend);
350 
351 	/* Complete all memory stores after setting bit */
352 	smp_mb__after_atomic();
353 
354 	ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, true);
355 
356 	return 0;
357 }
358 
359 /* Resume from sleep in s2idle case
360  */
361 static int __maybe_unused ipc_pcie_resume_s2idle(struct iosm_pcie *ipc_pcie)
362 {
363 	ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_ACTIVE);
364 
365 	ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, false);
366 
367 	/* Complete all memory stores before clearing bit. */
368 	smp_mb__before_atomic();
369 
370 	clear_bit(0, &ipc_pcie->suspend);
371 
372 	/* Complete all memory stores after clearing bit. */
373 	smp_mb__after_atomic();
374 	return 0;
375 }
376 
377 int __maybe_unused ipc_pcie_suspend(struct iosm_pcie *ipc_pcie)
378 {
379 	/* The HAL shall ask the shared memory layer whether D3 is allowed. */
380 	ipc_imem_pm_suspend(ipc_pcie->imem);
381 
382 	dev_dbg(ipc_pcie->dev, "SUSPEND done");
383 	return 0;
384 }
385 
386 int __maybe_unused ipc_pcie_resume(struct iosm_pcie *ipc_pcie)
387 {
388 	/* The HAL shall inform the shared memory layer that the device is
389 	 * active.
390 	 */
391 	ipc_imem_pm_resume(ipc_pcie->imem);
392 
393 	dev_dbg(ipc_pcie->dev, "RESUME done");
394 	return 0;
395 }
396 
397 static int __maybe_unused ipc_pcie_suspend_cb(struct device *dev)
398 {
399 	struct iosm_pcie *ipc_pcie;
400 	struct pci_dev *pdev;
401 
402 	pdev = to_pci_dev(dev);
403 
404 	ipc_pcie = pci_get_drvdata(pdev);
405 
406 	switch (ipc_pcie->d3l2_support) {
407 	case IPC_PCIE_D0L12:
408 		ipc_pcie_suspend_s2idle(ipc_pcie);
409 		break;
410 	case IPC_PCIE_D3L2:
411 		ipc_pcie_suspend(ipc_pcie);
412 		break;
413 	}
414 
415 	return 0;
416 }
417 
418 static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
419 {
420 	struct iosm_pcie *ipc_pcie;
421 	struct pci_dev *pdev;
422 
423 	pdev = to_pci_dev(dev);
424 
425 	ipc_pcie = pci_get_drvdata(pdev);
426 
427 	switch (ipc_pcie->d3l2_support) {
428 	case IPC_PCIE_D0L12:
429 		ipc_pcie_resume_s2idle(ipc_pcie);
430 		break;
431 	case IPC_PCIE_D3L2:
432 		ipc_pcie_resume(ipc_pcie);
433 		break;
434 	}
435 
436 	return 0;
437 }
438 
439 static SIMPLE_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, ipc_pcie_resume_cb);
440 
441 static struct pci_driver iosm_ipc_driver = {
442 	.name = KBUILD_MODNAME,
443 	.probe = ipc_pcie_probe,
444 	.remove = ipc_pcie_remove,
445 	.driver = {
446 		.pm = &iosm_ipc_pm,
447 	},
448 	.id_table = iosm_ipc_ids,
449 };
450 module_pci_driver(iosm_ipc_driver);
451 
452 int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
453 		      size_t size, dma_addr_t *mapping, int direction)
454 {
455 	if (ipc_pcie->pci) {
456 		*mapping = dma_map_single(&ipc_pcie->pci->dev, data, size,
457 					  direction);
458 		if (dma_mapping_error(&ipc_pcie->pci->dev, *mapping)) {
459 			dev_err(ipc_pcie->dev, "dma mapping failed");
460 			return -EINVAL;
461 		}
462 	}
463 	return 0;
464 }
465 
466 void ipc_pcie_addr_unmap(struct iosm_pcie *ipc_pcie, size_t size,
467 			 dma_addr_t mapping, int direction)
468 {
469 	if (!mapping)
470 		return;
471 	if (ipc_pcie->pci)
472 		dma_unmap_single(&ipc_pcie->pci->dev, mapping, size, direction);
473 }
474 
475 struct sk_buff *ipc_pcie_alloc_local_skb(struct iosm_pcie *ipc_pcie,
476 					 gfp_t flags, size_t size)
477 {
478 	struct sk_buff *skb;
479 
480 	if (!ipc_pcie || !size) {
481 		pr_err("invalid pcie object or size");
482 		return NULL;
483 	}
484 
485 	skb = __netdev_alloc_skb(NULL, size, flags);
486 	if (!skb)
487 		return NULL;
488 
489 	IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
490 	IPC_CB(skb)->mapping = 0;
491 
492 	return skb;
493 }
494 
495 struct sk_buff *ipc_pcie_alloc_skb(struct iosm_pcie *ipc_pcie, size_t size,
496 				   gfp_t flags, dma_addr_t *mapping,
497 				   int direction, size_t headroom)
498 {
499 	struct sk_buff *skb = ipc_pcie_alloc_local_skb(ipc_pcie, flags,
500 						       size + headroom);
501 	if (!skb)
502 		return NULL;
503 
504 	if (headroom)
505 		skb_reserve(skb, headroom);
506 
507 	if (ipc_pcie_addr_map(ipc_pcie, skb->data, size, mapping, direction)) {
508 		dev_kfree_skb(skb);
509 		return NULL;
510 	}
511 
512 	BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
513 
514 	/* Store the mapping address in skb scratch pad for later usage */
515 	IPC_CB(skb)->mapping = *mapping;
516 	IPC_CB(skb)->direction = direction;
517 	IPC_CB(skb)->len = size;
518 
519 	return skb;
520 }
521 
522 void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
523 {
524 	if (!skb)
525 		return;
526 
527 	ipc_pcie_addr_unmap(ipc_pcie, IPC_CB(skb)->len, IPC_CB(skb)->mapping,
528 			    IPC_CB(skb)->direction);
529 	IPC_CB(skb)->mapping = 0;
530 	dev_kfree_skb(skb);
531 }
532