xref: /linux/drivers/misc/cxl/cxllib.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
23ced8d73SChristophe Lombard /*
33ced8d73SChristophe Lombard  * Copyright 2017 IBM Corp.
43ced8d73SChristophe Lombard  */
53ced8d73SChristophe Lombard 
63ced8d73SChristophe Lombard #include <linux/hugetlb.h>
73ced8d73SChristophe Lombard #include <linux/sched/mm.h>
8*d8d2af70SChristophe Leroy #include <asm/opal-api.h>
93ced8d73SChristophe Lombard #include <asm/pnv-pci.h>
103ced8d73SChristophe Lombard #include <misc/cxllib.h>
113ced8d73SChristophe Lombard 
123ced8d73SChristophe Lombard #include "cxl.h"
133ced8d73SChristophe Lombard 
143ced8d73SChristophe Lombard #define CXL_INVALID_DRA                 ~0ull
153ced8d73SChristophe Lombard #define CXL_DUMMY_READ_SIZE             128
163ced8d73SChristophe Lombard #define CXL_DUMMY_READ_ALIGN            8
173ced8d73SChristophe Lombard #define CXL_CAPI_WINDOW_START           0x2000000000000ull
183ced8d73SChristophe Lombard #define CXL_CAPI_WINDOW_LOG_SIZE        48
193ced8d73SChristophe Lombard #define CXL_XSL_CONFIG_CURRENT_VERSION  CXL_XSL_CONFIG_VERSION1
203ced8d73SChristophe Lombard 
213ced8d73SChristophe Lombard 
cxllib_slot_is_supported(struct pci_dev * dev,unsigned long flags)223ced8d73SChristophe Lombard bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags)
233ced8d73SChristophe Lombard {
243ced8d73SChristophe Lombard 	int rc;
253ced8d73SChristophe Lombard 	u32 phb_index;
263ced8d73SChristophe Lombard 	u64 chip_id, capp_unit_id;
273ced8d73SChristophe Lombard 
283ced8d73SChristophe Lombard 	/* No flags currently supported */
293ced8d73SChristophe Lombard 	if (flags)
303ced8d73SChristophe Lombard 		return false;
313ced8d73SChristophe Lombard 
323ced8d73SChristophe Lombard 	if (!cpu_has_feature(CPU_FTR_HVMODE))
333ced8d73SChristophe Lombard 		return false;
343ced8d73SChristophe Lombard 
353ced8d73SChristophe Lombard 	if (!cxl_is_power9())
363ced8d73SChristophe Lombard 		return false;
373ced8d73SChristophe Lombard 
383ced8d73SChristophe Lombard 	if (cxl_slot_is_switched(dev))
393ced8d73SChristophe Lombard 		return false;
403ced8d73SChristophe Lombard 
413ced8d73SChristophe Lombard 	/* on p9, some pci slots are not connected to a CAPP unit */
423ced8d73SChristophe Lombard 	rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id);
433ced8d73SChristophe Lombard 	if (rc)
443ced8d73SChristophe Lombard 		return false;
453ced8d73SChristophe Lombard 
463ced8d73SChristophe Lombard 	return true;
473ced8d73SChristophe Lombard }
483ced8d73SChristophe Lombard EXPORT_SYMBOL_GPL(cxllib_slot_is_supported);
493ced8d73SChristophe Lombard 
503ced8d73SChristophe Lombard static DEFINE_MUTEX(dra_mutex);
513ced8d73SChristophe Lombard static u64 dummy_read_addr = CXL_INVALID_DRA;
523ced8d73SChristophe Lombard 
allocate_dummy_read_buf(void)533ced8d73SChristophe Lombard static int allocate_dummy_read_buf(void)
543ced8d73SChristophe Lombard {
553ced8d73SChristophe Lombard 	u64 buf, vaddr;
563ced8d73SChristophe Lombard 	size_t buf_size;
573ced8d73SChristophe Lombard 
583ced8d73SChristophe Lombard 	/*
593ced8d73SChristophe Lombard 	 * Dummy read buffer is 128-byte long, aligned on a
603ced8d73SChristophe Lombard 	 * 256-byte boundary and we need the physical address.
613ced8d73SChristophe Lombard 	 */
623ced8d73SChristophe Lombard 	buf_size = CXL_DUMMY_READ_SIZE + (1ull << CXL_DUMMY_READ_ALIGN);
633ced8d73SChristophe Lombard 	buf = (u64) kzalloc(buf_size, GFP_KERNEL);
643ced8d73SChristophe Lombard 	if (!buf)
653ced8d73SChristophe Lombard 		return -ENOMEM;
663ced8d73SChristophe Lombard 
673ced8d73SChristophe Lombard 	vaddr = (buf + (1ull << CXL_DUMMY_READ_ALIGN) - 1) &
683ced8d73SChristophe Lombard 					(~0ull << CXL_DUMMY_READ_ALIGN);
693ced8d73SChristophe Lombard 
703ced8d73SChristophe Lombard 	WARN((vaddr + CXL_DUMMY_READ_SIZE) > (buf + buf_size),
713ced8d73SChristophe Lombard 		"Dummy read buffer alignment issue");
723ced8d73SChristophe Lombard 	dummy_read_addr = virt_to_phys((void *) vaddr);
733ced8d73SChristophe Lombard 	return 0;
743ced8d73SChristophe Lombard }
753ced8d73SChristophe Lombard 
cxllib_get_xsl_config(struct pci_dev * dev,struct cxllib_xsl_config * cfg)763ced8d73SChristophe Lombard int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg)
773ced8d73SChristophe Lombard {
783ced8d73SChristophe Lombard 	int rc;
793ced8d73SChristophe Lombard 	u32 phb_index;
803ced8d73SChristophe Lombard 	u64 chip_id, capp_unit_id;
813ced8d73SChristophe Lombard 
823ced8d73SChristophe Lombard 	if (!cpu_has_feature(CPU_FTR_HVMODE))
833ced8d73SChristophe Lombard 		return -EINVAL;
843ced8d73SChristophe Lombard 
853ced8d73SChristophe Lombard 	mutex_lock(&dra_mutex);
863ced8d73SChristophe Lombard 	if (dummy_read_addr == CXL_INVALID_DRA) {
873ced8d73SChristophe Lombard 		rc = allocate_dummy_read_buf();
883ced8d73SChristophe Lombard 		if (rc) {
893ced8d73SChristophe Lombard 			mutex_unlock(&dra_mutex);
903ced8d73SChristophe Lombard 			return rc;
913ced8d73SChristophe Lombard 		}
923ced8d73SChristophe Lombard 	}
933ced8d73SChristophe Lombard 	mutex_unlock(&dra_mutex);
943ced8d73SChristophe Lombard 
953ced8d73SChristophe Lombard 	rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id);
963ced8d73SChristophe Lombard 	if (rc)
973ced8d73SChristophe Lombard 		return rc;
983ced8d73SChristophe Lombard 
999dbcbfa1SPhilippe Bergheaud 	rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &cfg->dsnctl);
1003ced8d73SChristophe Lombard 	if (rc)
1013ced8d73SChristophe Lombard 		return rc;
1023ced8d73SChristophe Lombard 
1033ced8d73SChristophe Lombard 	cfg->version  = CXL_XSL_CONFIG_CURRENT_VERSION;
1043ced8d73SChristophe Lombard 	cfg->log_bar_size = CXL_CAPI_WINDOW_LOG_SIZE;
1053ced8d73SChristophe Lombard 	cfg->bar_addr = CXL_CAPI_WINDOW_START;
1063ced8d73SChristophe Lombard 	cfg->dra = dummy_read_addr;
1073ced8d73SChristophe Lombard 	return 0;
1083ced8d73SChristophe Lombard }
1093ced8d73SChristophe Lombard EXPORT_SYMBOL_GPL(cxllib_get_xsl_config);
1103ced8d73SChristophe Lombard 
cxllib_switch_phb_mode(struct pci_dev * dev,enum cxllib_mode mode,unsigned long flags)1113ced8d73SChristophe Lombard int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode,
1123ced8d73SChristophe Lombard 			unsigned long flags)
1133ced8d73SChristophe Lombard {
1143ced8d73SChristophe Lombard 	int rc = 0;
1153ced8d73SChristophe Lombard 
1163ced8d73SChristophe Lombard 	if (!cpu_has_feature(CPU_FTR_HVMODE))
1173ced8d73SChristophe Lombard 		return -EINVAL;
1183ced8d73SChristophe Lombard 
1193ced8d73SChristophe Lombard 	switch (mode) {
1203ced8d73SChristophe Lombard 	case CXL_MODE_PCI:
1213ced8d73SChristophe Lombard 		/*
1223ced8d73SChristophe Lombard 		 * We currently don't support going back to PCI mode
1233ced8d73SChristophe Lombard 		 * However, we'll turn the invalidations off, so that
1243ced8d73SChristophe Lombard 		 * the firmware doesn't have to ack them and can do
1253ced8d73SChristophe Lombard 		 * things like reset, etc.. with no worries.
1263ced8d73SChristophe Lombard 		 * So always return EPERM (can't go back to PCI) or
1273ced8d73SChristophe Lombard 		 * EBUSY if we couldn't even turn off snooping
1283ced8d73SChristophe Lombard 		 */
1293ced8d73SChristophe Lombard 		rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_OFF);
1303ced8d73SChristophe Lombard 		if (rc)
1313ced8d73SChristophe Lombard 			rc = -EBUSY;
1323ced8d73SChristophe Lombard 		else
1333ced8d73SChristophe Lombard 			rc = -EPERM;
1343ced8d73SChristophe Lombard 		break;
1353ced8d73SChristophe Lombard 	case CXL_MODE_CXL:
1363ced8d73SChristophe Lombard 		/* DMA only supported on TVT1 for the time being */
1373ced8d73SChristophe Lombard 		if (flags != CXL_MODE_DMA_TVT1)
1383ced8d73SChristophe Lombard 			return -EINVAL;
1393ced8d73SChristophe Lombard 		rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_DMA_TVT1);
1403ced8d73SChristophe Lombard 		if (rc)
1413ced8d73SChristophe Lombard 			return rc;
1423ced8d73SChristophe Lombard 		rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON);
1433ced8d73SChristophe Lombard 		break;
1443ced8d73SChristophe Lombard 	default:
1453ced8d73SChristophe Lombard 		rc = -EINVAL;
1463ced8d73SChristophe Lombard 	}
1473ced8d73SChristophe Lombard 	return rc;
1483ced8d73SChristophe Lombard }
1493ced8d73SChristophe Lombard EXPORT_SYMBOL_GPL(cxllib_switch_phb_mode);
1503ced8d73SChristophe Lombard 
1513ced8d73SChristophe Lombard /*
1523ced8d73SChristophe Lombard  * When switching the PHB to capi mode, the TVT#1 entry for
1533ced8d73SChristophe Lombard  * the Partitionable Endpoint is set in bypass mode, like
1543ced8d73SChristophe Lombard  * in PCI mode.
1553ced8d73SChristophe Lombard  * Configure the device dma to use TVT#1, which is done
1563ced8d73SChristophe Lombard  * by calling dma_set_mask() with a mask large enough.
1573ced8d73SChristophe Lombard  */
cxllib_set_device_dma(struct pci_dev * dev,unsigned long flags)1583ced8d73SChristophe Lombard int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags)
1593ced8d73SChristophe Lombard {
1603ced8d73SChristophe Lombard 	int rc;
1613ced8d73SChristophe Lombard 
1623ced8d73SChristophe Lombard 	if (flags)
1633ced8d73SChristophe Lombard 		return -EINVAL;
1643ced8d73SChristophe Lombard 
1653ced8d73SChristophe Lombard 	rc = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
1663ced8d73SChristophe Lombard 	return rc;
1673ced8d73SChristophe Lombard }
1683ced8d73SChristophe Lombard EXPORT_SYMBOL_GPL(cxllib_set_device_dma);
1693ced8d73SChristophe Lombard 
cxllib_get_PE_attributes(struct task_struct * task,unsigned long translation_mode,struct cxllib_pe_attributes * attr)1703ced8d73SChristophe Lombard int cxllib_get_PE_attributes(struct task_struct *task,
1713ced8d73SChristophe Lombard 			     unsigned long translation_mode,
1723ced8d73SChristophe Lombard 			     struct cxllib_pe_attributes *attr)
1733ced8d73SChristophe Lombard {
1743ced8d73SChristophe Lombard 	if (translation_mode != CXL_TRANSLATED_MODE &&
1753ced8d73SChristophe Lombard 		translation_mode != CXL_REAL_MODE)
1763ced8d73SChristophe Lombard 		return -EINVAL;
1773ced8d73SChristophe Lombard 
1783ced8d73SChristophe Lombard 	attr->sr = cxl_calculate_sr(false,
1793ced8d73SChristophe Lombard 				task == NULL,
1803ced8d73SChristophe Lombard 				translation_mode == CXL_REAL_MODE,
1813ced8d73SChristophe Lombard 				true);
1823ced8d73SChristophe Lombard 	attr->lpid = mfspr(SPRN_LPID);
1833ced8d73SChristophe Lombard 	if (task) {
184245a389cSMarkus Elfring 		struct mm_struct *mm = get_task_mm(task);
1853ced8d73SChristophe Lombard 		if (mm == NULL)
1863ced8d73SChristophe Lombard 			return -EINVAL;
1873ced8d73SChristophe Lombard 		/*
1883ced8d73SChristophe Lombard 		 * Caller is keeping a reference on mm_users for as long
1893ced8d73SChristophe Lombard 		 * as XSL uses the memory context
1903ced8d73SChristophe Lombard 		 */
1913ced8d73SChristophe Lombard 		attr->pid = mm->context.id;
1923ced8d73SChristophe Lombard 		mmput(mm);
193b1db5513SChristophe Lombard 		attr->tid = task->thread.tidr;
1943ced8d73SChristophe Lombard 	} else {
1953ced8d73SChristophe Lombard 		attr->pid = 0;
1963ced8d73SChristophe Lombard 		attr->tid = 0;
197b1db5513SChristophe Lombard 	}
1983ced8d73SChristophe Lombard 	return 0;
1993ced8d73SChristophe Lombard }
2003ced8d73SChristophe Lombard EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes);
2013ced8d73SChristophe Lombard 
get_vma_info(struct mm_struct * mm,u64 addr,u64 * vma_start,u64 * vma_end,unsigned long * page_size)202ad7b4e80SFrederic Barrat static int get_vma_info(struct mm_struct *mm, u64 addr,
203ad7b4e80SFrederic Barrat 			u64 *vma_start, u64 *vma_end,
204ad7b4e80SFrederic Barrat 			unsigned long *page_size)
2053ced8d73SChristophe Lombard {
2063ced8d73SChristophe Lombard 	struct vm_area_struct *vma = NULL;
207ad7b4e80SFrederic Barrat 	int rc = 0;
2083ced8d73SChristophe Lombard 
209d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
2103ced8d73SChristophe Lombard 
2114fc0870dSChristophe Lombard 	vma = find_vma(mm, addr);
2124fc0870dSChristophe Lombard 	if (!vma) {
2134fc0870dSChristophe Lombard 		rc = -EFAULT;
2144fc0870dSChristophe Lombard 		goto out;
2154fc0870dSChristophe Lombard 	}
216ad7b4e80SFrederic Barrat 	*page_size = vma_kernel_pagesize(vma);
217ad7b4e80SFrederic Barrat 	*vma_start = vma->vm_start;
218ad7b4e80SFrederic Barrat 	*vma_end = vma->vm_end;
2193ced8d73SChristophe Lombard out:
220d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
2213ced8d73SChristophe Lombard 	return rc;
2223ced8d73SChristophe Lombard }
223ad7b4e80SFrederic Barrat 
cxllib_handle_fault(struct mm_struct * mm,u64 addr,u64 size,u64 flags)224ad7b4e80SFrederic Barrat int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
225ad7b4e80SFrederic Barrat {
226ad7b4e80SFrederic Barrat 	int rc;
227ad7b4e80SFrederic Barrat 	u64 dar, vma_start, vma_end;
228ad7b4e80SFrederic Barrat 	unsigned long page_size;
229ad7b4e80SFrederic Barrat 
230ad7b4e80SFrederic Barrat 	if (mm == NULL)
231ad7b4e80SFrederic Barrat 		return -EFAULT;
232ad7b4e80SFrederic Barrat 
233ad7b4e80SFrederic Barrat 	/*
234ad7b4e80SFrederic Barrat 	 * The buffer we have to process can extend over several pages
235ad7b4e80SFrederic Barrat 	 * and may also cover several VMAs.
236ad7b4e80SFrederic Barrat 	 * We iterate over all the pages. The page size could vary
237ad7b4e80SFrederic Barrat 	 * between VMAs.
238ad7b4e80SFrederic Barrat 	 */
239ad7b4e80SFrederic Barrat 	rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size);
240ad7b4e80SFrederic Barrat 	if (rc)
241ad7b4e80SFrederic Barrat 		return rc;
242ad7b4e80SFrederic Barrat 
243ad7b4e80SFrederic Barrat 	for (dar = (addr & ~(page_size - 1)); dar < (addr + size);
244ad7b4e80SFrederic Barrat 	     dar += page_size) {
245ad7b4e80SFrederic Barrat 		if (dar < vma_start || dar >= vma_end) {
246ad7b4e80SFrederic Barrat 			/*
247c1e8d7c6SMichel Lespinasse 			 * We don't hold mm->mmap_lock while iterating, since
248c1e8d7c6SMichel Lespinasse 			 * the lock is required by one of the lower-level page
249ad7b4e80SFrederic Barrat 			 * fault processing functions and it could
250ad7b4e80SFrederic Barrat 			 * create a deadlock.
251ad7b4e80SFrederic Barrat 			 *
252ad7b4e80SFrederic Barrat 			 * It means the VMAs can be altered between 2
253ad7b4e80SFrederic Barrat 			 * loop iterations and we could theoretically
254ad7b4e80SFrederic Barrat 			 * miss a page (however unlikely). But that's
255ad7b4e80SFrederic Barrat 			 * not really a problem, as the driver will
256ad7b4e80SFrederic Barrat 			 * retry access, get another page fault on the
257ad7b4e80SFrederic Barrat 			 * missing page and call us again.
258ad7b4e80SFrederic Barrat 			 */
259ad7b4e80SFrederic Barrat 			rc = get_vma_info(mm, dar, &vma_start, &vma_end,
260ad7b4e80SFrederic Barrat 					&page_size);
261ad7b4e80SFrederic Barrat 			if (rc)
262ad7b4e80SFrederic Barrat 				return rc;
263ad7b4e80SFrederic Barrat 		}
264ad7b4e80SFrederic Barrat 
265ad7b4e80SFrederic Barrat 		rc = cxl_handle_mm_fault(mm, flags, dar);
266ad7b4e80SFrederic Barrat 		if (rc)
267ad7b4e80SFrederic Barrat 			return -EFAULT;
268ad7b4e80SFrederic Barrat 	}
269ad7b4e80SFrederic Barrat 	return 0;
270ad7b4e80SFrederic Barrat }
2713ced8d73SChristophe Lombard EXPORT_SYMBOL_GPL(cxllib_handle_fault);
272