xref: /linux/drivers/crypto/caam/sg_sw_sec4.h (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * CAAM/SEC 4.x functions for using scatterlists in caam driver
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  */
7 
8 struct sec4_sg_entry;
9 
10 /*
11  * convert single dma address to h/w link table format
12  */
13 static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
14 				      dma_addr_t dma, u32 len, u32 offset)
15 {
16 	sec4_sg_ptr->ptr = dma;
17 	sec4_sg_ptr->len = len;
18 	sec4_sg_ptr->reserved = 0;
19 	sec4_sg_ptr->buf_pool_id = 0;
20 	sec4_sg_ptr->offset = offset;
21 #ifdef DEBUG
22 	print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
23 		       DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
24 		       sizeof(struct sec4_sg_entry), 1);
25 #endif
26 }
27 
28 /*
29  * convert scatterlist to h/w link table format
30  * but does not have final bit; instead, returns last entry
31  */
32 static inline struct sec4_sg_entry *
33 sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
34 	      struct sec4_sg_entry *sec4_sg_ptr, u32 offset)
35 {
36 	while (sg_count) {
37 		dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
38 				   sg_dma_len(sg), offset);
39 		sec4_sg_ptr++;
40 		sg = scatterwalk_sg_next(sg);
41 		sg_count--;
42 	}
43 	return sec4_sg_ptr - 1;
44 }
45 
46 /*
47  * convert scatterlist to h/w link table format
48  * scatterlist must have been previously dma mapped
49  */
50 static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
51 				      struct sec4_sg_entry *sec4_sg_ptr,
52 				      u32 offset)
53 {
54 	sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
55 	sec4_sg_ptr->len |= SEC4_SG_LEN_FIN;
56 }
57 
58 /* count number of elements in scatterlist */
59 static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
60 			     bool *chained)
61 {
62 	struct scatterlist *sg = sg_list;
63 	int sg_nents = 0;
64 
65 	while (nbytes > 0) {
66 		sg_nents++;
67 		nbytes -= sg->length;
68 		if (!sg_is_last(sg) && (sg + 1)->length == 0)
69 			*chained = true;
70 		sg = scatterwalk_sg_next(sg);
71 	}
72 
73 	return sg_nents;
74 }
75 
76 /* derive number of elements in scatterlist, but return 0 for 1 */
77 static inline int sg_count(struct scatterlist *sg_list, int nbytes,
78 			     bool *chained)
79 {
80 	int sg_nents = __sg_count(sg_list, nbytes, chained);
81 
82 	if (likely(sg_nents == 1))
83 		return 0;
84 
85 	return sg_nents;
86 }
87 
88 static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
89 			      unsigned int nents, enum dma_data_direction dir,
90 			      bool chained)
91 {
92 	if (unlikely(chained)) {
93 		int i;
94 		for (i = 0; i < nents; i++) {
95 			dma_map_sg(dev, sg, 1, dir);
96 			sg = scatterwalk_sg_next(sg);
97 		}
98 	} else {
99 		dma_map_sg(dev, sg, nents, dir);
100 	}
101 	return nents;
102 }
103 
104 static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
105 				unsigned int nents, enum dma_data_direction dir,
106 				bool chained)
107 {
108 	if (unlikely(chained)) {
109 		int i;
110 		for (i = 0; i < nents; i++) {
111 			dma_unmap_sg(dev, sg, 1, dir);
112 			sg = scatterwalk_sg_next(sg);
113 		}
114 	} else {
115 		dma_unmap_sg(dev, sg, nents, dir);
116 	}
117 	return nents;
118 }
119 
120 /* Map SG page in kernel virtual address space and copy */
121 static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
122 			       int len, int offset)
123 {
124 	u8 *mapped_addr;
125 
126 	/*
127 	 * Page here can be user-space pinned using get_user_pages
128 	 * Same must be kmapped before use and kunmapped subsequently
129 	 */
130 	mapped_addr = kmap_atomic(sg_page(sg));
131 	memcpy(dest, mapped_addr + offset, len);
132 	kunmap_atomic(mapped_addr);
133 }
134 
135 /* Copy from len bytes of sg to dest, starting from beginning */
136 static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
137 {
138 	struct scatterlist *current_sg = sg;
139 	int cpy_index = 0, next_cpy_index = current_sg->length;
140 
141 	while (next_cpy_index < len) {
142 		sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
143 			    current_sg->offset);
144 		current_sg = scatterwalk_sg_next(current_sg);
145 		cpy_index = next_cpy_index;
146 		next_cpy_index += current_sg->length;
147 	}
148 	if (cpy_index < len)
149 		sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
150 			    current_sg->offset);
151 }
152 
153 /* Copy sg data, from to_skip to end, to dest */
154 static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
155 				      int to_skip, unsigned int end)
156 {
157 	struct scatterlist *current_sg = sg;
158 	int sg_index, cpy_index, offset;
159 
160 	sg_index = current_sg->length;
161 	while (sg_index <= to_skip) {
162 		current_sg = scatterwalk_sg_next(current_sg);
163 		sg_index += current_sg->length;
164 	}
165 	cpy_index = sg_index - to_skip;
166 	offset = current_sg->offset + current_sg->length - cpy_index;
167 	sg_map_copy(dest, current_sg, cpy_index, offset);
168 	if (end - sg_index) {
169 		current_sg = scatterwalk_sg_next(current_sg);
170 		sg_copy(dest + cpy_index, current_sg, end - sg_index);
171 	}
172 }
173