xref: /linux/drivers/accel/amdxdna/amdxdna_pci_drv.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_accel.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_gem.h>
10 #include <drm/drm_gem_shmem_helper.h>
11 #include <drm/drm_ioctl.h>
12 #include <drm/drm_managed.h>
13 #include <drm/gpu_scheduler.h>
14 #include <linux/iommu.h>
15 #include <linux/pci.h>
16 
17 #include "amdxdna_ctx.h"
18 #include "amdxdna_gem.h"
19 #include "amdxdna_pci_drv.h"
20 #include "amdxdna_pm.h"
21 
22 MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
23 MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
24 MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
25 MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
26 
27 /*
28  * 0.0: Initial version
29  * 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY
30  * 0.2: Support getting last error hardware error
31  * 0.3: Support firmware debug buffer
32  * 0.4: Support getting resource information
33  * 0.5: Support getting telemetry data
34  */
35 #define AMDXDNA_DRIVER_MAJOR		0
36 #define AMDXDNA_DRIVER_MINOR		5
37 
38 /*
39  * Bind the driver base on (vendor_id, device_id) pair and later use the
40  * (device_id, rev_id) pair as a key to select the devices. The devices with
41  * same device_id have very similar interface to host driver.
42  */
43 static const struct pci_device_id pci_ids[] = {
44 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1502) },
45 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x17f0) },
46 	{0}
47 };
48 
49 MODULE_DEVICE_TABLE(pci, pci_ids);
50 
51 static const struct amdxdna_device_id amdxdna_ids[] = {
52 	{ 0x1502, 0x0,  &dev_npu1_info },
53 	{ 0x17f0, 0x0,  &dev_npu2_info },
54 	{ 0x17f0, 0x10, &dev_npu4_info },
55 	{ 0x17f0, 0x11, &dev_npu5_info },
56 	{ 0x17f0, 0x20, &dev_npu6_info },
57 	{0}
58 };
59 
60 static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
61 {
62 	struct amdxdna_dev *xdna = to_xdna_dev(ddev);
63 	struct amdxdna_client *client;
64 	int ret;
65 
66 	client = kzalloc(sizeof(*client), GFP_KERNEL);
67 	if (!client)
68 		return -ENOMEM;
69 
70 	client->pid = pid_nr(rcu_access_pointer(filp->pid));
71 	client->xdna = xdna;
72 
73 	client->sva = iommu_sva_bind_device(xdna->ddev.dev, current->mm);
74 	if (IS_ERR(client->sva)) {
75 		ret = PTR_ERR(client->sva);
76 		XDNA_ERR(xdna, "SVA bind device failed, ret %d", ret);
77 		goto failed;
78 	}
79 	client->pasid = iommu_sva_get_pasid(client->sva);
80 	if (client->pasid == IOMMU_PASID_INVALID) {
81 		XDNA_ERR(xdna, "SVA get pasid failed");
82 		ret = -ENODEV;
83 		goto unbind_sva;
84 	}
85 	init_srcu_struct(&client->hwctx_srcu);
86 	xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
87 	mutex_init(&client->mm_lock);
88 
89 	mutex_lock(&xdna->dev_lock);
90 	list_add_tail(&client->node, &xdna->client_list);
91 	mutex_unlock(&xdna->dev_lock);
92 
93 	filp->driver_priv = client;
94 	client->filp = filp;
95 
96 	XDNA_DBG(xdna, "pid %d opened", client->pid);
97 	return 0;
98 
99 unbind_sva:
100 	iommu_sva_unbind_device(client->sva);
101 failed:
102 	kfree(client);
103 
104 	return ret;
105 }
106 
107 static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
108 {
109 	struct amdxdna_client *client = filp->driver_priv;
110 	struct amdxdna_dev *xdna = to_xdna_dev(ddev);
111 
112 	XDNA_DBG(xdna, "closing pid %d", client->pid);
113 
114 	xa_destroy(&client->hwctx_xa);
115 	cleanup_srcu_struct(&client->hwctx_srcu);
116 	mutex_destroy(&client->mm_lock);
117 	if (client->dev_heap)
118 		drm_gem_object_put(to_gobj(client->dev_heap));
119 
120 	iommu_sva_unbind_device(client->sva);
121 
122 	XDNA_DBG(xdna, "pid %d closed", client->pid);
123 	kfree(client);
124 }
125 
126 static int amdxdna_flush(struct file *f, fl_owner_t id)
127 {
128 	struct drm_file *filp = f->private_data;
129 	struct amdxdna_client *client = filp->driver_priv;
130 	struct amdxdna_dev *xdna = client->xdna;
131 	int idx;
132 
133 	XDNA_DBG(xdna, "PID %d flushing...", client->pid);
134 	if (!drm_dev_enter(&xdna->ddev, &idx))
135 		return 0;
136 
137 	mutex_lock(&xdna->dev_lock);
138 	list_del_init(&client->node);
139 	amdxdna_hwctx_remove_all(client);
140 	mutex_unlock(&xdna->dev_lock);
141 
142 	drm_dev_exit(idx);
143 	return 0;
144 }
145 
146 static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
147 {
148 	struct amdxdna_client *client = filp->driver_priv;
149 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
150 	struct amdxdna_drm_get_info *args = data;
151 	int ret;
152 
153 	if (!xdna->dev_info->ops->get_aie_info)
154 		return -EOPNOTSUPP;
155 
156 	XDNA_DBG(xdna, "Request parameter %u", args->param);
157 	mutex_lock(&xdna->dev_lock);
158 	ret = xdna->dev_info->ops->get_aie_info(client, args);
159 	mutex_unlock(&xdna->dev_lock);
160 	return ret;
161 }
162 
163 static int amdxdna_drm_get_array_ioctl(struct drm_device *dev, void *data,
164 				       struct drm_file *filp)
165 {
166 	struct amdxdna_client *client = filp->driver_priv;
167 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
168 	struct amdxdna_drm_get_array *args = data;
169 
170 	if (!xdna->dev_info->ops->get_array)
171 		return -EOPNOTSUPP;
172 
173 	if (args->pad || !args->num_element || !args->element_size)
174 		return -EINVAL;
175 
176 	guard(mutex)(&xdna->dev_lock);
177 	return xdna->dev_info->ops->get_array(client, args);
178 }
179 
180 static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
181 {
182 	struct amdxdna_client *client = filp->driver_priv;
183 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
184 	struct amdxdna_drm_set_state *args = data;
185 	int ret;
186 
187 	if (!xdna->dev_info->ops->set_aie_state)
188 		return -EOPNOTSUPP;
189 
190 	XDNA_DBG(xdna, "Request parameter %u", args->param);
191 	mutex_lock(&xdna->dev_lock);
192 	ret = xdna->dev_info->ops->set_aie_state(client, args);
193 	mutex_unlock(&xdna->dev_lock);
194 
195 	return ret;
196 }
197 
198 static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
199 	/* Context */
200 	DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_HWCTX, amdxdna_drm_create_hwctx_ioctl, 0),
201 	DRM_IOCTL_DEF_DRV(AMDXDNA_DESTROY_HWCTX, amdxdna_drm_destroy_hwctx_ioctl, 0),
202 	DRM_IOCTL_DEF_DRV(AMDXDNA_CONFIG_HWCTX, amdxdna_drm_config_hwctx_ioctl, 0),
203 	/* BO */
204 	DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_BO, amdxdna_drm_create_bo_ioctl, 0),
205 	DRM_IOCTL_DEF_DRV(AMDXDNA_GET_BO_INFO, amdxdna_drm_get_bo_info_ioctl, 0),
206 	DRM_IOCTL_DEF_DRV(AMDXDNA_SYNC_BO, amdxdna_drm_sync_bo_ioctl, 0),
207 	/* Execution */
208 	DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
209 	/* AIE hardware */
210 	DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
211 	DRM_IOCTL_DEF_DRV(AMDXDNA_GET_ARRAY, amdxdna_drm_get_array_ioctl, 0),
212 	DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
213 };
214 
215 static const struct file_operations amdxdna_fops = {
216 	.owner		= THIS_MODULE,
217 	.open		= accel_open,
218 	.release	= drm_release,
219 	.flush		= amdxdna_flush,
220 	.unlocked_ioctl	= drm_ioctl,
221 	.compat_ioctl	= drm_compat_ioctl,
222 	.poll		= drm_poll,
223 	.read		= drm_read,
224 	.llseek		= noop_llseek,
225 	.mmap		= drm_gem_mmap,
226 	.fop_flags	= FOP_UNSIGNED_OFFSET,
227 };
228 
229 const struct drm_driver amdxdna_drm_drv = {
230 	.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL |
231 		DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
232 	.fops = &amdxdna_fops,
233 	.name = "amdxdna_accel_driver",
234 	.desc = "AMD XDNA DRM implementation",
235 	.major = AMDXDNA_DRIVER_MAJOR,
236 	.minor = AMDXDNA_DRIVER_MINOR,
237 	.open = amdxdna_drm_open,
238 	.postclose = amdxdna_drm_close,
239 	.ioctls = amdxdna_drm_ioctls,
240 	.num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
241 
242 	.gem_create_object = amdxdna_gem_create_object_cb,
243 	.gem_prime_import = amdxdna_gem_prime_import,
244 };
245 
246 static const struct amdxdna_dev_info *
247 amdxdna_get_dev_info(struct pci_dev *pdev)
248 {
249 	int i;
250 
251 	for (i = 0; i < ARRAY_SIZE(amdxdna_ids); i++) {
252 		if (pdev->device == amdxdna_ids[i].device &&
253 		    pdev->revision == amdxdna_ids[i].revision)
254 			return amdxdna_ids[i].dev_info;
255 	}
256 	return NULL;
257 }
258 
259 static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
260 {
261 	struct device *dev = &pdev->dev;
262 	struct amdxdna_dev *xdna;
263 	int ret;
264 
265 	xdna = devm_drm_dev_alloc(dev, &amdxdna_drm_drv, typeof(*xdna), ddev);
266 	if (IS_ERR(xdna))
267 		return PTR_ERR(xdna);
268 
269 	xdna->dev_info = amdxdna_get_dev_info(pdev);
270 	if (!xdna->dev_info)
271 		return -ENODEV;
272 
273 	drmm_mutex_init(&xdna->ddev, &xdna->dev_lock);
274 	init_rwsem(&xdna->notifier_lock);
275 	INIT_LIST_HEAD(&xdna->client_list);
276 	pci_set_drvdata(pdev, xdna);
277 
278 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
279 		fs_reclaim_acquire(GFP_KERNEL);
280 		might_lock(&xdna->notifier_lock);
281 		fs_reclaim_release(GFP_KERNEL);
282 	}
283 
284 	xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", 0);
285 	if (!xdna->notifier_wq)
286 		return -ENOMEM;
287 
288 	mutex_lock(&xdna->dev_lock);
289 	ret = xdna->dev_info->ops->init(xdna);
290 	mutex_unlock(&xdna->dev_lock);
291 	if (ret) {
292 		XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
293 		goto destroy_notifier_wq;
294 	}
295 
296 	ret = amdxdna_sysfs_init(xdna);
297 	if (ret) {
298 		XDNA_ERR(xdna, "Create amdxdna attrs failed: %d", ret);
299 		goto failed_dev_fini;
300 	}
301 
302 	ret = drm_dev_register(&xdna->ddev, 0);
303 	if (ret) {
304 		XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
305 		goto failed_sysfs_fini;
306 	}
307 
308 	return 0;
309 
310 failed_sysfs_fini:
311 	amdxdna_sysfs_fini(xdna);
312 failed_dev_fini:
313 	mutex_lock(&xdna->dev_lock);
314 	xdna->dev_info->ops->fini(xdna);
315 	mutex_unlock(&xdna->dev_lock);
316 destroy_notifier_wq:
317 	destroy_workqueue(xdna->notifier_wq);
318 	return ret;
319 }
320 
321 static void amdxdna_remove(struct pci_dev *pdev)
322 {
323 	struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
324 	struct amdxdna_client *client;
325 
326 	destroy_workqueue(xdna->notifier_wq);
327 
328 	drm_dev_unplug(&xdna->ddev);
329 	amdxdna_sysfs_fini(xdna);
330 
331 	mutex_lock(&xdna->dev_lock);
332 	client = list_first_entry_or_null(&xdna->client_list,
333 					  struct amdxdna_client, node);
334 	while (client) {
335 		list_del_init(&client->node);
336 		amdxdna_hwctx_remove_all(client);
337 
338 		client = list_first_entry_or_null(&xdna->client_list,
339 						  struct amdxdna_client, node);
340 	}
341 
342 	xdna->dev_info->ops->fini(xdna);
343 	mutex_unlock(&xdna->dev_lock);
344 }
345 
346 static const struct dev_pm_ops amdxdna_pm_ops = {
347 	SYSTEM_SLEEP_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume)
348 	RUNTIME_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume, NULL)
349 };
350 
351 static struct pci_driver amdxdna_pci_driver = {
352 	.name = KBUILD_MODNAME,
353 	.id_table = pci_ids,
354 	.probe = amdxdna_probe,
355 	.remove = amdxdna_remove,
356 	.driver.pm = &amdxdna_pm_ops,
357 };
358 
359 module_pci_driver(amdxdna_pci_driver);
360 
361 MODULE_LICENSE("GPL");
362 MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
363 MODULE_DESCRIPTION("amdxdna driver");
364