xref: /linux/drivers/gpu/drm/i915/i915_scatterlist.h (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef I915_SCATTERLIST_H
8 #define I915_SCATTERLIST_H
9 
10 #include <linux/pfn.h>
11 #include <linux/scatterlist.h>
12 #include <linux/swiotlb.h>
13 
14 #include "i915_gem.h"
15 
16 /*
17  * Optimised SGL iterator for GEM objects
18  */
19 static __always_inline struct sgt_iter {
20 	struct scatterlist *sgp;
21 	union {
22 		unsigned long pfn;
23 		dma_addr_t dma;
24 	};
25 	unsigned int curr;
26 	unsigned int max;
27 } __sgt_iter(struct scatterlist *sgl, bool dma) {
28 	struct sgt_iter s = { .sgp = sgl };
29 
30 	if (s.sgp) {
31 		s.max = s.curr = s.sgp->offset;
32 		s.max += s.sgp->length;
33 		if (dma)
34 			s.dma = sg_dma_address(s.sgp);
35 		else
36 			s.pfn = page_to_pfn(sg_page(s.sgp));
37 	}
38 
39 	return s;
40 }
41 
42 static inline int __sg_page_count(const struct scatterlist *sg)
43 {
44 	return sg->length >> PAGE_SHIFT;
45 }
46 
47 static inline struct scatterlist *____sg_next(struct scatterlist *sg)
48 {
49 	++sg;
50 	if (unlikely(sg_is_chain(sg)))
51 		sg = sg_chain_ptr(sg);
52 	return sg;
53 }
54 
55 /**
56  * __sg_next - return the next scatterlist entry in a list
57  * @sg:		The current sg entry
58  *
59  * Description:
60  *   If the entry is the last, return NULL; otherwise, step to the next
61  *   element in the array (@sg@+1). If that's a chain pointer, follow it;
62  *   otherwise just return the pointer to the current element.
63  **/
64 static inline struct scatterlist *__sg_next(struct scatterlist *sg)
65 {
66 	return sg_is_last(sg) ? NULL : ____sg_next(sg);
67 }
68 
69 /**
70  * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table
71  * @__dp:	Device address (output)
72  * @__iter:	'struct sgt_iter' (iterator state, internal)
73  * @__sgt:	sg_table to iterate over (input)
74  * @__step:	step size
75  */
76 #define __for_each_sgt_daddr(__dp, __iter, __sgt, __step)		\
77 	for ((__iter) = __sgt_iter((__sgt)->sgl, true);			\
78 	     ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp;	\
79 	     (((__iter).curr += (__step)) >= (__iter).max) ?		\
80 	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
81 
82 /**
83  * for_each_sgt_page - iterate over the pages of the given sg_table
84  * @__pp:	page pointer (output)
85  * @__iter:	'struct sgt_iter' (iterator state, internal)
86  * @__sgt:	sg_table to iterate over (input)
87  */
88 #define for_each_sgt_page(__pp, __iter, __sgt)				\
89 	for ((__iter) = __sgt_iter((__sgt)->sgl, false);		\
90 	     ((__pp) = (__iter).pfn == 0 ? NULL :			\
91 	      pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
92 	     (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?		\
93 	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
94 
95 static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
96 {
97 	unsigned int page_sizes;
98 
99 	page_sizes = 0;
100 	while (sg) {
101 		GEM_BUG_ON(sg->offset);
102 		GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
103 		page_sizes |= sg->length;
104 		sg = __sg_next(sg);
105 	}
106 
107 	return page_sizes;
108 }
109 
110 static inline unsigned int i915_sg_segment_size(void)
111 {
112 	unsigned int size = swiotlb_max_segment();
113 
114 	if (size == 0)
115 		return SCATTERLIST_MAX_SEGMENT;
116 
117 	size = rounddown(size, PAGE_SIZE);
118 	/* swiotlb_max_segment_size can return 1 byte when it means one page. */
119 	if (size < PAGE_SIZE)
120 		size = PAGE_SIZE;
121 
122 	return size;
123 }
124 
125 bool i915_sg_trim(struct sg_table *orig_st);
126 
127 #endif
128