1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #ifndef _IDXD_H_
4 #define _IDXD_H_
5
6 #include <linux/sbitmap.h>
7 #include <linux/dmaengine.h>
8 #include <linux/percpu-rwsem.h>
9 #include <linux/wait.h>
10 #include <linux/cdev.h>
11 #include <linux/idr.h>
12 #include <linux/pci.h>
13 #include <linux/bitmap.h>
14 #include <linux/perf_event.h>
15 #include <linux/iommu.h>
16 #include <linux/crypto.h>
17 #include <uapi/linux/idxd.h>
18 #include "registers.h"
19
20 #define IDXD_DRIVER_VERSION "1.00"
21
22 extern bool tc_override;
23
24 struct idxd_wq;
25 struct idxd_dev;
26
27 enum idxd_dev_type {
28 IDXD_DEV_NONE = -1,
29 IDXD_DEV_DSA = 0,
30 IDXD_DEV_IAX,
31 IDXD_DEV_WQ,
32 IDXD_DEV_GROUP,
33 IDXD_DEV_ENGINE,
34 IDXD_DEV_CDEV,
35 IDXD_DEV_CDEV_FILE,
36 IDXD_DEV_MAX_TYPE,
37 };
38
39 struct idxd_dev {
40 struct device conf_dev;
41 enum idxd_dev_type type;
42 };
43
44 #define IDXD_REG_TIMEOUT 50
45 #define IDXD_DRAIN_TIMEOUT 5000
46
47 enum idxd_type {
48 IDXD_TYPE_UNKNOWN = -1,
49 IDXD_TYPE_DSA = 0,
50 IDXD_TYPE_IAX,
51 IDXD_TYPE_MAX,
52 };
53
54 #define IDXD_NAME_SIZE 128
55 #define IDXD_PMU_EVENT_MAX 64
56
57 #define IDXD_ENQCMDS_RETRIES 32
58 #define IDXD_ENQCMDS_MAX_RETRIES 64
59
60 enum idxd_complete_type {
61 IDXD_COMPLETE_NORMAL = 0,
62 IDXD_COMPLETE_ABORT,
63 IDXD_COMPLETE_DEV_FAIL,
64 };
65
66 struct idxd_desc;
67
68 struct idxd_device_driver {
69 const char *name;
70 enum idxd_dev_type *type;
71 int (*probe)(struct idxd_dev *idxd_dev);
72 void (*remove)(struct idxd_dev *idxd_dev);
73 void (*desc_complete)(struct idxd_desc *desc,
74 enum idxd_complete_type comp_type,
75 bool free_desc,
76 void *ctx, u32 *status);
77 struct device_driver drv;
78 };
79
80 extern struct idxd_device_driver dsa_drv;
81 extern struct idxd_device_driver idxd_drv;
82 extern struct idxd_device_driver idxd_dmaengine_drv;
83 extern struct idxd_device_driver idxd_user_drv;
84
85 #define INVALID_INT_HANDLE -1
86 struct idxd_irq_entry {
87 int id;
88 int vector;
89 struct llist_head pending_llist;
90 struct list_head work_list;
91 /*
92 * Lock to protect access between irq thread process descriptor
93 * and irq thread processing error descriptor.
94 */
95 spinlock_t list_lock;
96 int int_handle;
97 ioasid_t pasid;
98 };
99
100 struct idxd_group {
101 struct idxd_dev idxd_dev;
102 struct idxd_device *idxd;
103 struct grpcfg grpcfg;
104 int id;
105 int num_engines;
106 int num_wqs;
107 bool use_rdbuf_limit;
108 u8 rdbufs_allowed;
109 u8 rdbufs_reserved;
110 int tc_a;
111 int tc_b;
112 int desc_progress_limit;
113 int batch_progress_limit;
114 };
115
116 struct idxd_pmu {
117 struct idxd_device *idxd;
118
119 struct perf_event *event_list[IDXD_PMU_EVENT_MAX];
120 int n_events;
121
122 DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX);
123
124 struct pmu pmu;
125 char name[IDXD_NAME_SIZE];
126
127 int n_counters;
128 int counter_width;
129 int n_event_categories;
130
131 bool per_counter_caps_supported;
132 unsigned long supported_event_categories;
133
134 unsigned long supported_filters;
135 int n_filters;
136 };
137
138 #define IDXD_MAX_PRIORITY 0xf
139
140 enum {
141 COUNTER_FAULTS = 0,
142 COUNTER_FAULT_FAILS,
143 COUNTER_MAX
144 };
145
146 enum idxd_wq_state {
147 IDXD_WQ_DISABLED = 0,
148 IDXD_WQ_ENABLED,
149 };
150
151 enum idxd_wq_flag {
152 WQ_FLAG_DEDICATED = 0,
153 WQ_FLAG_BLOCK_ON_FAULT,
154 WQ_FLAG_ATS_DISABLE,
155 WQ_FLAG_PRS_DISABLE,
156 };
157
158 enum idxd_wq_type {
159 IDXD_WQT_NONE = 0,
160 IDXD_WQT_KERNEL,
161 IDXD_WQT_USER,
162 };
163
164 struct idxd_cdev {
165 struct idxd_wq *wq;
166 struct cdev cdev;
167 struct idxd_dev idxd_dev;
168 int minor;
169 };
170
171 #define DRIVER_NAME_SIZE 128
172
173 #define WQ_NAME_SIZE 1024
174 #define WQ_TYPE_SIZE 10
175
176 #define WQ_DEFAULT_QUEUE_DEPTH 16
177 #define WQ_DEFAULT_MAX_XFER SZ_2M
178 #define WQ_DEFAULT_MAX_BATCH 32
179
180 enum idxd_op_type {
181 IDXD_OP_BLOCK = 0,
182 IDXD_OP_NONBLOCK = 1,
183 };
184
185 struct idxd_dma_chan {
186 struct dma_chan chan;
187 struct idxd_wq *wq;
188 };
189
190 struct idxd_wq {
191 void __iomem *portal;
192 u32 portal_offset;
193 unsigned int enqcmds_retries;
194 struct percpu_ref wq_active;
195 struct completion wq_dead;
196 struct completion wq_resurrect;
197 struct idxd_dev idxd_dev;
198 struct idxd_cdev *idxd_cdev;
199 struct wait_queue_head err_queue;
200 struct workqueue_struct *wq;
201 struct idxd_device *idxd;
202 int id;
203 struct idxd_irq_entry ie;
204 enum idxd_wq_type type;
205 struct idxd_group *group;
206 int client_count;
207 struct mutex wq_lock; /* mutex for workqueue */
208 u32 size;
209 u32 threshold;
210 u32 priority;
211 enum idxd_wq_state state;
212 unsigned long flags;
213 union wqcfg *wqcfg;
214 unsigned long *opcap_bmap;
215
216 struct dsa_hw_desc **hw_descs;
217 int num_descs;
218 union {
219 struct dsa_completion_record *compls;
220 struct iax_completion_record *iax_compls;
221 };
222 dma_addr_t compls_addr;
223 int compls_size;
224 struct idxd_desc **descs;
225 struct sbitmap_queue sbq;
226 struct idxd_dma_chan *idxd_chan;
227 char name[WQ_NAME_SIZE + 1];
228 u64 max_xfer_bytes;
229 u32 max_batch_size;
230 u32 max_sgl_size;
231
232 /* Lock to protect upasid_xa access. */
233 struct mutex uc_lock;
234 struct xarray upasid_xa;
235
236 char driver_name[DRIVER_NAME_SIZE + 1];
237 };
238
239 struct idxd_engine {
240 struct idxd_dev idxd_dev;
241 int id;
242 struct idxd_group *group;
243 struct idxd_device *idxd;
244 };
245
246 /* shadow registers */
247 struct idxd_hw {
248 u32 version;
249 union gen_cap_reg gen_cap;
250 union wq_cap_reg wq_cap;
251 union group_cap_reg group_cap;
252 union engine_cap_reg engine_cap;
253 struct opcap opcap;
254 u32 cmd_cap;
255 union iaa_cap_reg iaa_cap;
256 union dsacap0_reg dsacap0;
257 union dsacap1_reg dsacap1;
258 union dsacap2_reg dsacap2;
259 };
260
261 enum idxd_device_state {
262 IDXD_DEV_HALTED = -1,
263 IDXD_DEV_DISABLED = 0,
264 IDXD_DEV_ENABLED,
265 };
266
267 enum idxd_device_flag {
268 IDXD_FLAG_CONFIGURABLE = 0,
269 IDXD_FLAG_CMD_RUNNING,
270 IDXD_FLAG_PASID_ENABLED,
271 IDXD_FLAG_USER_PASID_ENABLED,
272 };
273
274 struct idxd_dma_dev {
275 struct idxd_device *idxd;
276 struct dma_device dma;
277 };
278
279 typedef int (*load_device_defaults_fn_t) (struct idxd_device *idxd);
280
281 struct idxd_driver_data {
282 const char *name_prefix;
283 enum idxd_type type;
284 const struct device_type *dev_type;
285 int compl_size;
286 int align;
287 int evl_cr_off;
288 int cr_status_off;
289 int cr_result_off;
290 bool user_submission_safe;
291 load_device_defaults_fn_t load_device_defaults;
292 };
293
294 struct idxd_evl {
295 /* Lock to protect event log access. */
296 struct mutex lock;
297 void *log;
298 dma_addr_t dma;
299 /* Total size of event log = number of entries * entry size. */
300 unsigned int log_size;
301 /* The number of entries in the event log. */
302 u16 size;
303 unsigned long *bmap;
304 bool batch_fail[IDXD_MAX_BATCH_IDENT];
305 };
306
307 struct idxd_evl_fault {
308 struct work_struct work;
309 struct idxd_wq *wq;
310 u8 status;
311
312 /* make this last member always */
313 struct __evl_entry entry[];
314 };
315
316 struct idxd_device {
317 struct idxd_dev idxd_dev;
318 struct idxd_driver_data *data;
319 struct list_head list;
320 struct idxd_hw hw;
321 enum idxd_device_state state;
322 unsigned long flags;
323 int id;
324 int major;
325 u32 cmd_status;
326 struct idxd_irq_entry ie; /* misc irq, msix 0 */
327
328 struct pci_dev *pdev;
329 void __iomem *reg_base;
330
331 spinlock_t dev_lock; /* spinlock for device */
332 spinlock_t cmd_lock; /* spinlock for device commands */
333 struct completion *cmd_done;
334 struct idxd_group **groups;
335 struct idxd_wq **wqs;
336 struct idxd_engine **engines;
337
338 struct iommu_sva *sva;
339 unsigned int pasid;
340
341 int num_groups;
342 int irq_cnt;
343 bool request_int_handles;
344
345 u32 msix_perm_offset;
346 u32 wqcfg_offset;
347 u32 grpcfg_offset;
348 u32 perfmon_offset;
349
350 u64 max_xfer_bytes;
351 u32 max_batch_size;
352 u32 max_sgl_size;
353 int max_groups;
354 int max_engines;
355 int max_rdbufs;
356 int max_wqs;
357 int max_wq_size;
358 int rdbuf_limit;
359 int nr_rdbufs; /* non-reserved read buffers */
360 unsigned int wqcfg_size;
361 unsigned long *wq_enable_map;
362
363 union sw_err_reg sw_err;
364 wait_queue_head_t cmd_waitq;
365
366 struct idxd_dma_dev *idxd_dma;
367 struct workqueue_struct *wq;
368 struct work_struct work;
369
370 struct idxd_pmu *idxd_pmu;
371
372 unsigned long *opcap_bmap;
373 struct idxd_evl *evl;
374 struct kmem_cache *evl_cache;
375
376 struct dentry *dbgfs_dir;
377 struct dentry *dbgfs_evl_file;
378
379 bool user_submission_safe;
380
381 struct idxd_saved_states *idxd_saved;
382 };
383
384 struct idxd_saved_states {
385 struct idxd_device saved_idxd;
386 struct idxd_evl saved_evl;
387 struct idxd_engine **saved_engines;
388 struct idxd_wq **saved_wqs;
389 struct idxd_group **saved_groups;
390 unsigned long *saved_wq_enable_map;
391 };
392
evl_ent_size(struct idxd_device * idxd)393 static inline unsigned int evl_ent_size(struct idxd_device *idxd)
394 {
395 return idxd->hw.gen_cap.evl_support ?
396 (32 * (1 << idxd->hw.gen_cap.evl_support)) : 0;
397 }
398
evl_size(struct idxd_device * idxd)399 static inline unsigned int evl_size(struct idxd_device *idxd)
400 {
401 return idxd->evl->size * evl_ent_size(idxd);
402 }
403
404 struct crypto_ctx {
405 struct acomp_req *req;
406 struct crypto_tfm *tfm;
407 dma_addr_t src_addr;
408 dma_addr_t dst_addr;
409 bool compress;
410 };
411
412 /* IDXD software descriptor */
413 struct idxd_desc {
414 union {
415 struct dsa_hw_desc *hw;
416 struct iax_hw_desc *iax_hw;
417 };
418 dma_addr_t desc_dma;
419 union {
420 struct dsa_completion_record *completion;
421 struct iax_completion_record *iax_completion;
422 };
423 dma_addr_t compl_dma;
424 union {
425 struct dma_async_tx_descriptor txd;
426 struct crypto_ctx crypto;
427 };
428 struct llist_node llnode;
429 struct list_head list;
430 int id;
431 int cpu;
432 struct idxd_wq *wq;
433 };
434
435 /*
436 * This is software defined error for the completion status. We overload the error code
437 * that will never appear in completion status and only SWERR register.
438 */
439 enum idxd_completion_status {
440 IDXD_COMP_DESC_ABORT = 0xff,
441 };
442
443 #define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev
444 #define wq_confdev(wq) &wq->idxd_dev.conf_dev
445 #define engine_confdev(engine) &engine->idxd_dev.conf_dev
446 #define group_confdev(group) &group->idxd_dev.conf_dev
447 #define cdev_dev(cdev) &cdev->idxd_dev.conf_dev
448 #define user_ctx_dev(ctx) (&(ctx)->idxd_dev.conf_dev)
449
450 #define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev)
451 #define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
452 #define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev)
453
wq_to_idxd_drv(struct idxd_wq * wq)454 static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq)
455 {
456 struct device *dev = wq_confdev(wq);
457 struct idxd_device_driver *idxd_drv =
458 container_of(dev->driver, struct idxd_device_driver, drv);
459
460 return idxd_drv;
461 }
462
confdev_to_idxd(struct device * dev)463 static inline struct idxd_device *confdev_to_idxd(struct device *dev)
464 {
465 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
466
467 return idxd_dev_to_idxd(idxd_dev);
468 }
469
confdev_to_wq(struct device * dev)470 static inline struct idxd_wq *confdev_to_wq(struct device *dev)
471 {
472 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
473
474 return idxd_dev_to_wq(idxd_dev);
475 }
476
confdev_to_engine(struct device * dev)477 static inline struct idxd_engine *confdev_to_engine(struct device *dev)
478 {
479 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
480
481 return container_of(idxd_dev, struct idxd_engine, idxd_dev);
482 }
483
confdev_to_group(struct device * dev)484 static inline struct idxd_group *confdev_to_group(struct device *dev)
485 {
486 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
487
488 return container_of(idxd_dev, struct idxd_group, idxd_dev);
489 }
490
dev_to_cdev(struct device * dev)491 static inline struct idxd_cdev *dev_to_cdev(struct device *dev)
492 {
493 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
494
495 return container_of(idxd_dev, struct idxd_cdev, idxd_dev);
496 }
497
idxd_dev_set_type(struct idxd_dev * idev,int type)498 static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
499 {
500 if (type >= IDXD_DEV_MAX_TYPE) {
501 idev->type = IDXD_DEV_NONE;
502 return;
503 }
504
505 idev->type = type;
506 }
507
idxd_get_ie(struct idxd_device * idxd,int idx)508 static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx)
509 {
510 return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie;
511 }
512
ie_to_wq(struct idxd_irq_entry * ie)513 static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie)
514 {
515 return container_of(ie, struct idxd_wq, ie);
516 }
517
ie_to_idxd(struct idxd_irq_entry * ie)518 static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
519 {
520 return container_of(ie, struct idxd_device, ie);
521 }
522
idxd_set_user_intr(struct idxd_device * idxd,bool enable)523 static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable)
524 {
525 union gencfg_reg reg;
526
527 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
528 reg.user_int_en = enable;
529 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
530 }
531
532 extern const struct bus_type dsa_bus_type;
533
534 extern bool support_enqcmd;
535 extern struct ida idxd_ida;
536 extern const struct device_type dsa_device_type;
537 extern const struct device_type iax_device_type;
538 extern const struct device_type idxd_wq_device_type;
539 extern const struct device_type idxd_engine_device_type;
540 extern const struct device_type idxd_group_device_type;
541
is_dsa_dev(struct idxd_dev * idxd_dev)542 static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
543 {
544 return idxd_dev->type == IDXD_DEV_DSA;
545 }
546
is_iax_dev(struct idxd_dev * idxd_dev)547 static inline bool is_iax_dev(struct idxd_dev *idxd_dev)
548 {
549 return idxd_dev->type == IDXD_DEV_IAX;
550 }
551
is_idxd_dev(struct idxd_dev * idxd_dev)552 static inline bool is_idxd_dev(struct idxd_dev *idxd_dev)
553 {
554 return is_dsa_dev(idxd_dev) || is_iax_dev(idxd_dev);
555 }
556
is_idxd_wq_dev(struct idxd_dev * idxd_dev)557 static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev)
558 {
559 return idxd_dev->type == IDXD_DEV_WQ;
560 }
561
is_idxd_wq_dmaengine(struct idxd_wq * wq)562 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
563 {
564 if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
565 return true;
566 return false;
567 }
568
is_idxd_wq_user(struct idxd_wq * wq)569 static inline bool is_idxd_wq_user(struct idxd_wq *wq)
570 {
571 return wq->type == IDXD_WQT_USER;
572 }
573
is_idxd_wq_kernel(struct idxd_wq * wq)574 static inline bool is_idxd_wq_kernel(struct idxd_wq *wq)
575 {
576 return wq->type == IDXD_WQT_KERNEL;
577 }
578
wq_dedicated(struct idxd_wq * wq)579 static inline bool wq_dedicated(struct idxd_wq *wq)
580 {
581 return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
582 }
583
wq_shared(struct idxd_wq * wq)584 static inline bool wq_shared(struct idxd_wq *wq)
585 {
586 return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
587 }
588
device_pasid_enabled(struct idxd_device * idxd)589 static inline bool device_pasid_enabled(struct idxd_device *idxd)
590 {
591 return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
592 }
593
device_user_pasid_enabled(struct idxd_device * idxd)594 static inline bool device_user_pasid_enabled(struct idxd_device *idxd)
595 {
596 return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
597 }
598
wq_pasid_enabled(struct idxd_wq * wq)599 static inline bool wq_pasid_enabled(struct idxd_wq *wq)
600 {
601 return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) ||
602 (is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd));
603 }
604
wq_shared_supported(struct idxd_wq * wq)605 static inline bool wq_shared_supported(struct idxd_wq *wq)
606 {
607 return (support_enqcmd && wq_pasid_enabled(wq));
608 }
609
610 enum idxd_portal_prot {
611 IDXD_PORTAL_UNLIMITED = 0,
612 IDXD_PORTAL_LIMITED,
613 };
614
615 enum idxd_interrupt_type {
616 IDXD_IRQ_MSIX = 0,
617 IDXD_IRQ_IMS,
618 };
619
idxd_get_wq_portal_offset(enum idxd_portal_prot prot)620 static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
621 {
622 return prot * 0x1000;
623 }
624
idxd_get_wq_portal_full_offset(int wq_id,enum idxd_portal_prot prot)625 static inline int idxd_get_wq_portal_full_offset(int wq_id,
626 enum idxd_portal_prot prot)
627 {
628 return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
629 }
630
631 #define IDXD_PORTAL_MASK (PAGE_SIZE - 1)
632
633 /*
634 * Even though this function can be accessed by multiple threads, it is safe to use.
635 * At worst the address gets used more than once before it gets incremented. We don't
636 * hit a threshold until iops becomes many million times a second. So the occasional
637 * reuse of the same address is tolerable compare to using an atomic variable. This is
638 * safe on a system that has atomic load/store for 32bit integers. Given that this is an
639 * Intel iEP device, that should not be a problem.
640 */
idxd_wq_portal_addr(struct idxd_wq * wq)641 static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq)
642 {
643 int ofs = wq->portal_offset;
644
645 wq->portal_offset = (ofs + sizeof(struct dsa_raw_desc)) & IDXD_PORTAL_MASK;
646 return wq->portal + ofs;
647 }
648
idxd_wq_get(struct idxd_wq * wq)649 static inline void idxd_wq_get(struct idxd_wq *wq)
650 {
651 wq->client_count++;
652 }
653
idxd_wq_put(struct idxd_wq * wq)654 static inline void idxd_wq_put(struct idxd_wq *wq)
655 {
656 wq->client_count--;
657 }
658
idxd_wq_refcount(struct idxd_wq * wq)659 static inline int idxd_wq_refcount(struct idxd_wq *wq)
660 {
661 return wq->client_count;
662 };
663
idxd_wq_set_private(struct idxd_wq * wq,void * private)664 static inline void idxd_wq_set_private(struct idxd_wq *wq, void *private)
665 {
666 dev_set_drvdata(wq_confdev(wq), private);
667 }
668
idxd_wq_get_private(struct idxd_wq * wq)669 static inline void *idxd_wq_get_private(struct idxd_wq *wq)
670 {
671 return dev_get_drvdata(wq_confdev(wq));
672 }
673
674 /*
675 * Intel IAA does not support batch processing.
676 * The max batch size of device, max batch size of wq and
677 * max batch shift of wqcfg should be always 0 on IAA.
678 */
idxd_set_max_batch_size(int idxd_type,struct idxd_device * idxd,u32 max_batch_size)679 static inline void idxd_set_max_batch_size(int idxd_type, struct idxd_device *idxd,
680 u32 max_batch_size)
681 {
682 if (idxd_type == IDXD_TYPE_IAX)
683 idxd->max_batch_size = 0;
684 else
685 idxd->max_batch_size = max_batch_size;
686 }
687
idxd_wq_set_max_batch_size(int idxd_type,struct idxd_wq * wq,u32 max_batch_size)688 static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq,
689 u32 max_batch_size)
690 {
691 if (idxd_type == IDXD_TYPE_IAX)
692 wq->max_batch_size = 0;
693 else
694 wq->max_batch_size = max_batch_size;
695 }
696
idxd_sgl_supported(struct idxd_device * idxd)697 static bool idxd_sgl_supported(struct idxd_device *idxd)
698 {
699 return idxd->data->type == IDXD_TYPE_DSA &&
700 idxd->hw.version >= DEVICE_VERSION_3 &&
701 idxd->hw.dsacap0.sgl_formats;
702 }
703
idxd_wq_set_init_max_sgl_size(struct idxd_device * idxd,struct idxd_wq * wq)704 static inline void idxd_wq_set_init_max_sgl_size(struct idxd_device *idxd,
705 struct idxd_wq *wq)
706 {
707 if (idxd_sgl_supported(idxd))
708 wq->max_sgl_size = 1U << idxd->hw.dsacap0.max_sgl_shift;
709 }
710
idxd_wqcfg_set_max_batch_shift(int idxd_type,union wqcfg * wqcfg,u32 max_batch_shift)711 static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg,
712 u32 max_batch_shift)
713 {
714 if (idxd_type == IDXD_TYPE_IAX)
715 wqcfg->max_batch_shift = 0;
716 else
717 wqcfg->max_batch_shift = max_batch_shift;
718 }
719
idxd_wq_driver_name_match(struct idxd_wq * wq,struct device * dev)720 static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev)
721 {
722 return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0);
723 }
724
725 #define MODULE_ALIAS_IDXD_DEVICE(type) MODULE_ALIAS("idxd:t" __stringify(type) "*")
726 #define IDXD_DEVICES_MODALIAS_FMT "idxd:t%d"
727
728 int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
729 struct module *module, const char *mod_name);
730 #define idxd_driver_register(driver) \
731 __idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
732
733 void idxd_driver_unregister(struct idxd_device_driver *idxd_drv);
734
735 #define module_idxd_driver(__idxd_driver) \
736 module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister)
737
738 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
739 void idxd_dma_complete_txd(struct idxd_desc *desc,
740 enum idxd_complete_type comp_type,
741 bool free_desc, void *ctx, u32 *status);
742
idxd_desc_complete(struct idxd_desc * desc,enum idxd_complete_type comp_type,bool free_desc)743 static inline void idxd_desc_complete(struct idxd_desc *desc,
744 enum idxd_complete_type comp_type,
745 bool free_desc)
746 {
747 struct idxd_device_driver *drv;
748 u32 status;
749
750 drv = wq_to_idxd_drv(desc->wq);
751 if (drv->desc_complete)
752 drv->desc_complete(desc, comp_type, free_desc,
753 &desc->txd, &status);
754 }
755
756 int idxd_register_devices(struct idxd_device *idxd);
757 void idxd_unregister_devices(struct idxd_device *idxd);
758 void idxd_wqs_quiesce(struct idxd_device *idxd);
759 bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
760 void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
761 int idxd_load_iaa_device_defaults(struct idxd_device *idxd);
762
763 /* device interrupt control */
764 irqreturn_t idxd_misc_thread(int vec, void *data);
765 irqreturn_t idxd_wq_thread(int irq, void *data);
766 void idxd_mask_error_interrupts(struct idxd_device *idxd);
767 void idxd_unmask_error_interrupts(struct idxd_device *idxd);
768
769 /* device control */
770 int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
771 int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
772 const struct pci_device_id *id);
773 void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
774 int idxd_drv_enable_wq(struct idxd_wq *wq);
775 void idxd_drv_disable_wq(struct idxd_wq *wq);
776 int idxd_device_init_reset(struct idxd_device *idxd);
777 int idxd_device_enable(struct idxd_device *idxd);
778 int idxd_device_disable(struct idxd_device *idxd);
779 void idxd_device_reset(struct idxd_device *idxd);
780 void idxd_device_clear_state(struct idxd_device *idxd);
781 int idxd_device_config(struct idxd_device *idxd);
782 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
783 int idxd_device_load_config(struct idxd_device *idxd);
784 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
785 enum idxd_interrupt_type irq_type);
786 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
787 enum idxd_interrupt_type irq_type);
788
789 /* work queue control */
790 void idxd_wqs_unmap_portal(struct idxd_device *idxd);
791 int idxd_wq_alloc_resources(struct idxd_wq *wq);
792 void idxd_wq_free_resources(struct idxd_wq *wq);
793 int idxd_wq_enable(struct idxd_wq *wq);
794 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config);
795 void idxd_wq_drain(struct idxd_wq *wq);
796 void idxd_wq_reset(struct idxd_wq *wq);
797 int idxd_wq_map_portal(struct idxd_wq *wq);
798 void idxd_wq_unmap_portal(struct idxd_wq *wq);
799 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
800 int idxd_wq_disable_pasid(struct idxd_wq *wq);
801 void __idxd_wq_quiesce(struct idxd_wq *wq);
802 void idxd_wq_quiesce(struct idxd_wq *wq);
803 int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
804 void idxd_wq_free_irq(struct idxd_wq *wq);
805 int idxd_wq_request_irq(struct idxd_wq *wq);
806 void idxd_wq_flush_descs(struct idxd_wq *wq);
807
808 /* submission */
809 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
810 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
811 int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
812
813 /* dmaengine */
814 int idxd_register_dma_device(struct idxd_device *idxd);
815 void idxd_unregister_dma_device(struct idxd_device *idxd);
816
817 /* cdev */
818 int idxd_cdev_register(void);
819 void idxd_cdev_remove(void);
820 int idxd_cdev_get_major(struct idxd_device *idxd);
821 int idxd_wq_add_cdev(struct idxd_wq *wq);
822 void idxd_wq_del_cdev(struct idxd_wq *wq);
823 int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
824 void *buf, int len);
825 void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index);
826
827 /* perfmon */
828 #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
829 int perfmon_pmu_init(struct idxd_device *idxd);
830 void perfmon_pmu_remove(struct idxd_device *idxd);
831 void perfmon_counter_overflow(struct idxd_device *idxd);
832 #else
perfmon_pmu_init(struct idxd_device * idxd)833 static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; }
perfmon_pmu_remove(struct idxd_device * idxd)834 static inline void perfmon_pmu_remove(struct idxd_device *idxd) {}
perfmon_counter_overflow(struct idxd_device * idxd)835 static inline void perfmon_counter_overflow(struct idxd_device *idxd) {}
836 #endif
837
838 /* debugfs */
839 int idxd_device_init_debugfs(struct idxd_device *idxd);
840 void idxd_device_remove_debugfs(struct idxd_device *idxd);
841 int idxd_init_debugfs(void);
842 void idxd_remove_debugfs(void);
843
844 #endif
845