xref: /linux/drivers/dma/idxd/idxd.h (revision aa8d18becc0c14aa3eb46d6d1b81450446e11b87)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #ifndef _IDXD_H_
4 #define _IDXD_H_
5 
6 #include <linux/sbitmap.h>
7 #include <linux/dmaengine.h>
8 #include <linux/percpu-rwsem.h>
9 #include <linux/wait.h>
10 #include <linux/cdev.h>
11 #include <linux/idr.h>
12 #include <linux/pci.h>
13 #include <linux/bitmap.h>
14 #include <linux/perf_event.h>
15 #include <linux/iommu.h>
16 #include <linux/crypto.h>
17 #include <uapi/linux/idxd.h>
18 #include "registers.h"
19 
20 #define IDXD_DRIVER_VERSION	"1.00"
21 
22 extern struct kmem_cache *idxd_desc_pool;
23 extern bool tc_override;
24 
25 struct idxd_wq;
26 struct idxd_dev;
27 
28 enum idxd_dev_type {
29 	IDXD_DEV_NONE = -1,
30 	IDXD_DEV_DSA = 0,
31 	IDXD_DEV_IAX,
32 	IDXD_DEV_WQ,
33 	IDXD_DEV_GROUP,
34 	IDXD_DEV_ENGINE,
35 	IDXD_DEV_CDEV,
36 	IDXD_DEV_CDEV_FILE,
37 	IDXD_DEV_MAX_TYPE,
38 };
39 
40 struct idxd_dev {
41 	struct device conf_dev;
42 	enum idxd_dev_type type;
43 };
44 
45 #define IDXD_REG_TIMEOUT	50
46 #define IDXD_DRAIN_TIMEOUT	5000
47 
48 enum idxd_type {
49 	IDXD_TYPE_UNKNOWN = -1,
50 	IDXD_TYPE_DSA = 0,
51 	IDXD_TYPE_IAX,
52 	IDXD_TYPE_MAX,
53 };
54 
55 #define IDXD_NAME_SIZE		128
56 #define IDXD_PMU_EVENT_MAX	64
57 
58 #define IDXD_ENQCMDS_RETRIES		32
59 #define IDXD_ENQCMDS_MAX_RETRIES	64
60 
61 enum idxd_complete_type {
62 	IDXD_COMPLETE_NORMAL = 0,
63 	IDXD_COMPLETE_ABORT,
64 	IDXD_COMPLETE_DEV_FAIL,
65 };
66 
67 struct idxd_desc;
68 
69 struct idxd_device_driver {
70 	const char *name;
71 	enum idxd_dev_type *type;
72 	int (*probe)(struct idxd_dev *idxd_dev);
73 	void (*remove)(struct idxd_dev *idxd_dev);
74 	void (*desc_complete)(struct idxd_desc *desc,
75 			      enum idxd_complete_type comp_type,
76 			      bool free_desc,
77 			      void *ctx, u32 *status);
78 	struct device_driver drv;
79 };
80 
81 extern struct idxd_device_driver dsa_drv;
82 extern struct idxd_device_driver idxd_drv;
83 extern struct idxd_device_driver idxd_dmaengine_drv;
84 extern struct idxd_device_driver idxd_user_drv;
85 
86 #define INVALID_INT_HANDLE	-1
87 struct idxd_irq_entry {
88 	int id;
89 	int vector;
90 	struct llist_head pending_llist;
91 	struct list_head work_list;
92 	/*
93 	 * Lock to protect access between irq thread process descriptor
94 	 * and irq thread processing error descriptor.
95 	 */
96 	spinlock_t list_lock;
97 	int int_handle;
98 	ioasid_t pasid;
99 };
100 
101 struct idxd_group {
102 	struct idxd_dev idxd_dev;
103 	struct idxd_device *idxd;
104 	struct grpcfg grpcfg;
105 	int id;
106 	int num_engines;
107 	int num_wqs;
108 	bool use_rdbuf_limit;
109 	u8 rdbufs_allowed;
110 	u8 rdbufs_reserved;
111 	int tc_a;
112 	int tc_b;
113 	int desc_progress_limit;
114 	int batch_progress_limit;
115 };
116 
117 struct idxd_pmu {
118 	struct idxd_device *idxd;
119 
120 	struct perf_event *event_list[IDXD_PMU_EVENT_MAX];
121 	int n_events;
122 
123 	DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX);
124 
125 	struct pmu pmu;
126 	char name[IDXD_NAME_SIZE];
127 	int cpu;
128 
129 	int n_counters;
130 	int counter_width;
131 	int n_event_categories;
132 
133 	bool per_counter_caps_supported;
134 	unsigned long supported_event_categories;
135 
136 	unsigned long supported_filters;
137 	int n_filters;
138 
139 	struct hlist_node cpuhp_node;
140 };
141 
142 #define IDXD_MAX_PRIORITY	0xf
143 
144 enum {
145 	COUNTER_FAULTS = 0,
146 	COUNTER_FAULT_FAILS,
147 	COUNTER_MAX
148 };
149 
150 enum idxd_wq_state {
151 	IDXD_WQ_DISABLED = 0,
152 	IDXD_WQ_ENABLED,
153 };
154 
155 enum idxd_wq_flag {
156 	WQ_FLAG_DEDICATED = 0,
157 	WQ_FLAG_BLOCK_ON_FAULT,
158 	WQ_FLAG_ATS_DISABLE,
159 	WQ_FLAG_PRS_DISABLE,
160 };
161 
162 enum idxd_wq_type {
163 	IDXD_WQT_NONE = 0,
164 	IDXD_WQT_KERNEL,
165 	IDXD_WQT_USER,
166 };
167 
168 struct idxd_cdev {
169 	struct idxd_wq *wq;
170 	struct cdev cdev;
171 	struct idxd_dev idxd_dev;
172 	int minor;
173 };
174 
175 #define DRIVER_NAME_SIZE		128
176 
177 #define IDXD_ALLOCATED_BATCH_SIZE	128U
178 #define WQ_NAME_SIZE   1024
179 #define WQ_TYPE_SIZE   10
180 
181 #define WQ_DEFAULT_QUEUE_DEPTH		16
182 #define WQ_DEFAULT_MAX_XFER		SZ_2M
183 #define WQ_DEFAULT_MAX_BATCH		32
184 
185 enum idxd_op_type {
186 	IDXD_OP_BLOCK = 0,
187 	IDXD_OP_NONBLOCK = 1,
188 };
189 
190 struct idxd_dma_chan {
191 	struct dma_chan chan;
192 	struct idxd_wq *wq;
193 };
194 
195 struct idxd_wq {
196 	void __iomem *portal;
197 	u32 portal_offset;
198 	unsigned int enqcmds_retries;
199 	struct percpu_ref wq_active;
200 	struct completion wq_dead;
201 	struct completion wq_resurrect;
202 	struct idxd_dev idxd_dev;
203 	struct idxd_cdev *idxd_cdev;
204 	struct wait_queue_head err_queue;
205 	struct workqueue_struct *wq;
206 	struct idxd_device *idxd;
207 	int id;
208 	struct idxd_irq_entry ie;
209 	enum idxd_wq_type type;
210 	struct idxd_group *group;
211 	int client_count;
212 	struct mutex wq_lock;	/* mutex for workqueue */
213 	u32 size;
214 	u32 threshold;
215 	u32 priority;
216 	enum idxd_wq_state state;
217 	unsigned long flags;
218 	union wqcfg *wqcfg;
219 	unsigned long *opcap_bmap;
220 
221 	struct dsa_hw_desc **hw_descs;
222 	int num_descs;
223 	union {
224 		struct dsa_completion_record *compls;
225 		struct iax_completion_record *iax_compls;
226 	};
227 	dma_addr_t compls_addr;
228 	int compls_size;
229 	struct idxd_desc **descs;
230 	struct sbitmap_queue sbq;
231 	struct idxd_dma_chan *idxd_chan;
232 	char name[WQ_NAME_SIZE + 1];
233 	u64 max_xfer_bytes;
234 	u32 max_batch_size;
235 
236 	/* Lock to protect upasid_xa access. */
237 	struct mutex uc_lock;
238 	struct xarray upasid_xa;
239 
240 	char driver_name[DRIVER_NAME_SIZE + 1];
241 };
242 
243 struct idxd_engine {
244 	struct idxd_dev idxd_dev;
245 	int id;
246 	struct idxd_group *group;
247 	struct idxd_device *idxd;
248 };
249 
250 /* shadow registers */
251 struct idxd_hw {
252 	u32 version;
253 	union gen_cap_reg gen_cap;
254 	union wq_cap_reg wq_cap;
255 	union group_cap_reg group_cap;
256 	union engine_cap_reg engine_cap;
257 	struct opcap opcap;
258 	u32 cmd_cap;
259 	union iaa_cap_reg iaa_cap;
260 };
261 
262 enum idxd_device_state {
263 	IDXD_DEV_HALTED = -1,
264 	IDXD_DEV_DISABLED = 0,
265 	IDXD_DEV_ENABLED,
266 };
267 
268 enum idxd_device_flag {
269 	IDXD_FLAG_CONFIGURABLE = 0,
270 	IDXD_FLAG_CMD_RUNNING,
271 	IDXD_FLAG_PASID_ENABLED,
272 	IDXD_FLAG_USER_PASID_ENABLED,
273 };
274 
275 struct idxd_dma_dev {
276 	struct idxd_device *idxd;
277 	struct dma_device dma;
278 };
279 
280 struct idxd_driver_data {
281 	const char *name_prefix;
282 	enum idxd_type type;
283 	struct device_type *dev_type;
284 	int compl_size;
285 	int align;
286 	int evl_cr_off;
287 	int cr_status_off;
288 	int cr_result_off;
289 };
290 
291 struct idxd_evl {
292 	/* Lock to protect event log access. */
293 	spinlock_t lock;
294 	void *log;
295 	dma_addr_t dma;
296 	/* Total size of event log = number of entries * entry size. */
297 	unsigned int log_size;
298 	/* The number of entries in the event log. */
299 	u16 size;
300 	u16 head;
301 	unsigned long *bmap;
302 	bool batch_fail[IDXD_MAX_BATCH_IDENT];
303 };
304 
305 struct idxd_evl_fault {
306 	struct work_struct work;
307 	struct idxd_wq *wq;
308 	u8 status;
309 
310 	/* make this last member always */
311 	struct __evl_entry entry[];
312 };
313 
314 struct idxd_device {
315 	struct idxd_dev idxd_dev;
316 	struct idxd_driver_data *data;
317 	struct list_head list;
318 	struct idxd_hw hw;
319 	enum idxd_device_state state;
320 	unsigned long flags;
321 	int id;
322 	int major;
323 	u32 cmd_status;
324 	struct idxd_irq_entry ie;	/* misc irq, msix 0 */
325 
326 	struct pci_dev *pdev;
327 	void __iomem *reg_base;
328 
329 	spinlock_t dev_lock;	/* spinlock for device */
330 	spinlock_t cmd_lock;	/* spinlock for device commands */
331 	struct completion *cmd_done;
332 	struct idxd_group **groups;
333 	struct idxd_wq **wqs;
334 	struct idxd_engine **engines;
335 
336 	struct iommu_sva *sva;
337 	unsigned int pasid;
338 
339 	int num_groups;
340 	int irq_cnt;
341 	bool request_int_handles;
342 
343 	u32 msix_perm_offset;
344 	u32 wqcfg_offset;
345 	u32 grpcfg_offset;
346 	u32 perfmon_offset;
347 
348 	u64 max_xfer_bytes;
349 	u32 max_batch_size;
350 	int max_groups;
351 	int max_engines;
352 	int max_rdbufs;
353 	int max_wqs;
354 	int max_wq_size;
355 	int rdbuf_limit;
356 	int nr_rdbufs;		/* non-reserved read buffers */
357 	unsigned int wqcfg_size;
358 	unsigned long *wq_enable_map;
359 
360 	union sw_err_reg sw_err;
361 	wait_queue_head_t cmd_waitq;
362 
363 	struct idxd_dma_dev *idxd_dma;
364 	struct workqueue_struct *wq;
365 	struct work_struct work;
366 
367 	struct idxd_pmu *idxd_pmu;
368 
369 	unsigned long *opcap_bmap;
370 	struct idxd_evl *evl;
371 	struct kmem_cache *evl_cache;
372 
373 	struct dentry *dbgfs_dir;
374 	struct dentry *dbgfs_evl_file;
375 };
376 
377 static inline unsigned int evl_ent_size(struct idxd_device *idxd)
378 {
379 	return idxd->hw.gen_cap.evl_support ?
380 	       (32 * (1 << idxd->hw.gen_cap.evl_support)) : 0;
381 }
382 
383 static inline unsigned int evl_size(struct idxd_device *idxd)
384 {
385 	return idxd->evl->size * evl_ent_size(idxd);
386 }
387 
388 struct crypto_ctx {
389 	struct acomp_req *req;
390 	struct crypto_tfm *tfm;
391 	dma_addr_t src_addr;
392 	dma_addr_t dst_addr;
393 	bool compress;
394 };
395 
396 /* IDXD software descriptor */
397 struct idxd_desc {
398 	union {
399 		struct dsa_hw_desc *hw;
400 		struct iax_hw_desc *iax_hw;
401 	};
402 	dma_addr_t desc_dma;
403 	union {
404 		struct dsa_completion_record *completion;
405 		struct iax_completion_record *iax_completion;
406 	};
407 	dma_addr_t compl_dma;
408 	union {
409 		struct dma_async_tx_descriptor txd;
410 		struct crypto_ctx crypto;
411 	};
412 	struct llist_node llnode;
413 	struct list_head list;
414 	int id;
415 	int cpu;
416 	struct idxd_wq *wq;
417 };
418 
419 /*
420  * This is software defined error for the completion status. We overload the error code
421  * that will never appear in completion status and only SWERR register.
422  */
423 enum idxd_completion_status {
424 	IDXD_COMP_DESC_ABORT = 0xff,
425 };
426 
427 #define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev
428 #define wq_confdev(wq) &wq->idxd_dev.conf_dev
429 #define engine_confdev(engine) &engine->idxd_dev.conf_dev
430 #define group_confdev(group) &group->idxd_dev.conf_dev
431 #define cdev_dev(cdev) &cdev->idxd_dev.conf_dev
432 #define user_ctx_dev(ctx) (&(ctx)->idxd_dev.conf_dev)
433 
434 #define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev)
435 #define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
436 #define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev)
437 
438 static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq)
439 {
440 	struct device *dev = wq_confdev(wq);
441 	struct idxd_device_driver *idxd_drv =
442 		container_of(dev->driver, struct idxd_device_driver, drv);
443 
444 	return idxd_drv;
445 }
446 
447 static inline struct idxd_device *confdev_to_idxd(struct device *dev)
448 {
449 	struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
450 
451 	return idxd_dev_to_idxd(idxd_dev);
452 }
453 
454 static inline struct idxd_wq *confdev_to_wq(struct device *dev)
455 {
456 	struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
457 
458 	return idxd_dev_to_wq(idxd_dev);
459 }
460 
461 static inline struct idxd_engine *confdev_to_engine(struct device *dev)
462 {
463 	struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
464 
465 	return container_of(idxd_dev, struct idxd_engine, idxd_dev);
466 }
467 
468 static inline struct idxd_group *confdev_to_group(struct device *dev)
469 {
470 	struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
471 
472 	return container_of(idxd_dev, struct idxd_group, idxd_dev);
473 }
474 
475 static inline struct idxd_cdev *dev_to_cdev(struct device *dev)
476 {
477 	struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
478 
479 	return container_of(idxd_dev, struct idxd_cdev, idxd_dev);
480 }
481 
482 static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
483 {
484 	if (type >= IDXD_DEV_MAX_TYPE) {
485 		idev->type = IDXD_DEV_NONE;
486 		return;
487 	}
488 
489 	idev->type = type;
490 }
491 
492 static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx)
493 {
494 	return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie;
495 }
496 
497 static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie)
498 {
499 	return container_of(ie, struct idxd_wq, ie);
500 }
501 
502 static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
503 {
504 	return container_of(ie, struct idxd_device, ie);
505 }
506 
507 static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable)
508 {
509 	union gencfg_reg reg;
510 
511 	reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
512 	reg.user_int_en = enable;
513 	iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
514 }
515 
516 extern struct bus_type dsa_bus_type;
517 
518 extern bool support_enqcmd;
519 extern struct ida idxd_ida;
520 extern struct device_type dsa_device_type;
521 extern struct device_type iax_device_type;
522 extern struct device_type idxd_wq_device_type;
523 extern struct device_type idxd_engine_device_type;
524 extern struct device_type idxd_group_device_type;
525 
526 static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
527 {
528 	return idxd_dev->type == IDXD_DEV_DSA;
529 }
530 
531 static inline bool is_iax_dev(struct idxd_dev *idxd_dev)
532 {
533 	return idxd_dev->type == IDXD_DEV_IAX;
534 }
535 
536 static inline bool is_idxd_dev(struct idxd_dev *idxd_dev)
537 {
538 	return is_dsa_dev(idxd_dev) || is_iax_dev(idxd_dev);
539 }
540 
541 static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev)
542 {
543 	return idxd_dev->type == IDXD_DEV_WQ;
544 }
545 
546 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
547 {
548 	if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
549 		return true;
550 	return false;
551 }
552 
553 static inline bool is_idxd_wq_user(struct idxd_wq *wq)
554 {
555 	return wq->type == IDXD_WQT_USER;
556 }
557 
558 static inline bool is_idxd_wq_kernel(struct idxd_wq *wq)
559 {
560 	return wq->type == IDXD_WQT_KERNEL;
561 }
562 
563 static inline bool wq_dedicated(struct idxd_wq *wq)
564 {
565 	return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
566 }
567 
568 static inline bool wq_shared(struct idxd_wq *wq)
569 {
570 	return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
571 }
572 
573 static inline bool device_pasid_enabled(struct idxd_device *idxd)
574 {
575 	return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
576 }
577 
578 static inline bool device_user_pasid_enabled(struct idxd_device *idxd)
579 {
580 	return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
581 }
582 
583 static inline bool wq_pasid_enabled(struct idxd_wq *wq)
584 {
585 	return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) ||
586 	       (is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd));
587 }
588 
589 static inline bool wq_shared_supported(struct idxd_wq *wq)
590 {
591 	return (support_enqcmd && wq_pasid_enabled(wq));
592 }
593 
594 enum idxd_portal_prot {
595 	IDXD_PORTAL_UNLIMITED = 0,
596 	IDXD_PORTAL_LIMITED,
597 };
598 
599 enum idxd_interrupt_type {
600 	IDXD_IRQ_MSIX = 0,
601 	IDXD_IRQ_IMS,
602 };
603 
604 static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
605 {
606 	return prot * 0x1000;
607 }
608 
609 static inline int idxd_get_wq_portal_full_offset(int wq_id,
610 						 enum idxd_portal_prot prot)
611 {
612 	return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
613 }
614 
615 #define IDXD_PORTAL_MASK	(PAGE_SIZE - 1)
616 
617 /*
618  * Even though this function can be accessed by multiple threads, it is safe to use.
619  * At worst the address gets used more than once before it gets incremented. We don't
620  * hit a threshold until iops becomes many million times a second. So the occasional
621  * reuse of the same address is tolerable compare to using an atomic variable. This is
622  * safe on a system that has atomic load/store for 32bit integers. Given that this is an
623  * Intel iEP device, that should not be a problem.
624  */
625 static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq)
626 {
627 	int ofs = wq->portal_offset;
628 
629 	wq->portal_offset = (ofs + sizeof(struct dsa_raw_desc)) & IDXD_PORTAL_MASK;
630 	return wq->portal + ofs;
631 }
632 
633 static inline void idxd_wq_get(struct idxd_wq *wq)
634 {
635 	wq->client_count++;
636 }
637 
638 static inline void idxd_wq_put(struct idxd_wq *wq)
639 {
640 	wq->client_count--;
641 }
642 
643 static inline int idxd_wq_refcount(struct idxd_wq *wq)
644 {
645 	return wq->client_count;
646 };
647 
648 static inline void idxd_wq_set_private(struct idxd_wq *wq, void *private)
649 {
650 	dev_set_drvdata(wq_confdev(wq), private);
651 }
652 
653 static inline void *idxd_wq_get_private(struct idxd_wq *wq)
654 {
655 	return dev_get_drvdata(wq_confdev(wq));
656 }
657 
658 /*
659  * Intel IAA does not support batch processing.
660  * The max batch size of device, max batch size of wq and
661  * max batch shift of wqcfg should be always 0 on IAA.
662  */
663 static inline void idxd_set_max_batch_size(int idxd_type, struct idxd_device *idxd,
664 					   u32 max_batch_size)
665 {
666 	if (idxd_type == IDXD_TYPE_IAX)
667 		idxd->max_batch_size = 0;
668 	else
669 		idxd->max_batch_size = max_batch_size;
670 }
671 
672 static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq,
673 					      u32 max_batch_size)
674 {
675 	if (idxd_type == IDXD_TYPE_IAX)
676 		wq->max_batch_size = 0;
677 	else
678 		wq->max_batch_size = max_batch_size;
679 }
680 
681 static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg,
682 						  u32 max_batch_shift)
683 {
684 	if (idxd_type == IDXD_TYPE_IAX)
685 		wqcfg->max_batch_shift = 0;
686 	else
687 		wqcfg->max_batch_shift = max_batch_shift;
688 }
689 
690 static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev)
691 {
692 	return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0);
693 }
694 
695 #define MODULE_ALIAS_IDXD_DEVICE(type) MODULE_ALIAS("idxd:t" __stringify(type) "*")
696 #define IDXD_DEVICES_MODALIAS_FMT "idxd:t%d"
697 
698 int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
699 					struct module *module, const char *mod_name);
700 #define idxd_driver_register(driver) \
701 	__idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
702 
703 void idxd_driver_unregister(struct idxd_device_driver *idxd_drv);
704 
705 #define module_idxd_driver(__idxd_driver) \
706 	module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister)
707 
708 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
709 void idxd_dma_complete_txd(struct idxd_desc *desc,
710 			   enum idxd_complete_type comp_type,
711 			   bool free_desc, void *ctx, u32 *status);
712 
713 static inline void idxd_desc_complete(struct idxd_desc *desc,
714 				      enum idxd_complete_type comp_type,
715 				      bool free_desc)
716 {
717 	struct idxd_device_driver *drv;
718 	u32 status;
719 
720 	drv = wq_to_idxd_drv(desc->wq);
721 	if (drv->desc_complete)
722 		drv->desc_complete(desc, comp_type, free_desc,
723 				   &desc->txd, &status);
724 }
725 
726 int idxd_register_bus_type(void);
727 void idxd_unregister_bus_type(void);
728 int idxd_register_devices(struct idxd_device *idxd);
729 void idxd_unregister_devices(struct idxd_device *idxd);
730 void idxd_wqs_quiesce(struct idxd_device *idxd);
731 bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
732 void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
733 
734 /* device interrupt control */
735 irqreturn_t idxd_misc_thread(int vec, void *data);
736 irqreturn_t idxd_wq_thread(int irq, void *data);
737 void idxd_mask_error_interrupts(struct idxd_device *idxd);
738 void idxd_unmask_error_interrupts(struct idxd_device *idxd);
739 
740 /* device control */
741 int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
742 void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
743 int idxd_drv_enable_wq(struct idxd_wq *wq);
744 void idxd_drv_disable_wq(struct idxd_wq *wq);
745 int idxd_device_init_reset(struct idxd_device *idxd);
746 int idxd_device_enable(struct idxd_device *idxd);
747 int idxd_device_disable(struct idxd_device *idxd);
748 void idxd_device_reset(struct idxd_device *idxd);
749 void idxd_device_clear_state(struct idxd_device *idxd);
750 int idxd_device_config(struct idxd_device *idxd);
751 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
752 int idxd_device_load_config(struct idxd_device *idxd);
753 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
754 				   enum idxd_interrupt_type irq_type);
755 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
756 				   enum idxd_interrupt_type irq_type);
757 
758 /* work queue control */
759 void idxd_wqs_unmap_portal(struct idxd_device *idxd);
760 int idxd_wq_alloc_resources(struct idxd_wq *wq);
761 void idxd_wq_free_resources(struct idxd_wq *wq);
762 int idxd_wq_enable(struct idxd_wq *wq);
763 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config);
764 void idxd_wq_drain(struct idxd_wq *wq);
765 void idxd_wq_reset(struct idxd_wq *wq);
766 int idxd_wq_map_portal(struct idxd_wq *wq);
767 void idxd_wq_unmap_portal(struct idxd_wq *wq);
768 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
769 int idxd_wq_disable_pasid(struct idxd_wq *wq);
770 void __idxd_wq_quiesce(struct idxd_wq *wq);
771 void idxd_wq_quiesce(struct idxd_wq *wq);
772 int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
773 void idxd_wq_free_irq(struct idxd_wq *wq);
774 int idxd_wq_request_irq(struct idxd_wq *wq);
775 
776 /* submission */
777 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
778 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
779 int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
780 
781 /* dmaengine */
782 int idxd_register_dma_device(struct idxd_device *idxd);
783 void idxd_unregister_dma_device(struct idxd_device *idxd);
784 
785 /* cdev */
786 int idxd_cdev_register(void);
787 void idxd_cdev_remove(void);
788 int idxd_cdev_get_major(struct idxd_device *idxd);
789 int idxd_wq_add_cdev(struct idxd_wq *wq);
790 void idxd_wq_del_cdev(struct idxd_wq *wq);
791 int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
792 		 void *buf, int len);
793 void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index);
794 
795 /* perfmon */
796 #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
797 int perfmon_pmu_init(struct idxd_device *idxd);
798 void perfmon_pmu_remove(struct idxd_device *idxd);
799 void perfmon_counter_overflow(struct idxd_device *idxd);
800 void perfmon_init(void);
801 void perfmon_exit(void);
802 #else
803 static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; }
804 static inline void perfmon_pmu_remove(struct idxd_device *idxd) {}
805 static inline void perfmon_counter_overflow(struct idxd_device *idxd) {}
806 static inline void perfmon_init(void) {}
807 static inline void perfmon_exit(void) {}
808 #endif
809 
810 /* debugfs */
811 int idxd_device_init_debugfs(struct idxd_device *idxd);
812 void idxd_device_remove_debugfs(struct idxd_device *idxd);
813 int idxd_init_debugfs(void);
814 void idxd_remove_debugfs(void);
815 
816 #endif
817