1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. 6 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 #ifndef _LINUX_SCATTERLIST_H_ 33 #define _LINUX_SCATTERLIST_H_ 34 35 #include <linux/page.h> 36 #include <linux/slab.h> 37 #include <linux/mm.h> 38 39 struct bus_dmamap; 40 struct scatterlist { 41 unsigned long page_link; 42 #define SG_PAGE_LINK_CHAIN 0x1UL 43 #define SG_PAGE_LINK_LAST 0x2UL 44 #define SG_PAGE_LINK_MASK 0x3UL 45 unsigned int offset; 46 unsigned int length; 47 dma_addr_t dma_address; 48 struct bus_dmamap *dma_map; /* FreeBSD specific */ 49 }; 50 51 CTASSERT((sizeof(struct scatterlist) & SG_PAGE_LINK_MASK) == 0); 52 53 struct sg_table { 54 struct scatterlist *sgl; 55 unsigned int nents; 56 unsigned int orig_nents; 57 }; 58 59 struct sg_page_iter { 60 struct scatterlist *sg; 61 unsigned int sg_pgoffset; 62 unsigned int maxents; 63 struct { 64 unsigned int nents; 65 int pg_advance; 66 } internal; 67 }; 68 69 #define SCATTERLIST_MAX_SEGMENT (-1U & ~(PAGE_SIZE - 1)) 70 71 #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) 72 73 #define SG_MAGIC 0x87654321UL 74 #define SG_CHAIN SG_PAGE_LINK_CHAIN 75 #define SG_END SG_PAGE_LINK_LAST 76 77 #define sg_is_chain(sg) ((sg)->page_link & SG_PAGE_LINK_CHAIN) 78 #define sg_is_last(sg) ((sg)->page_link & SG_PAGE_LINK_LAST) 79 #define sg_chain_ptr(sg) \ 80 ((struct scatterlist *) ((sg)->page_link & ~SG_PAGE_LINK_MASK)) 81 82 #define sg_dma_address(sg) (sg)->dma_address 83 #define sg_dma_len(sg) (sg)->length 84 85 #define for_each_sg_page(sgl, iter, nents, pgoffset) \ 86 for (_sg_iter_init(sgl, iter, nents, pgoffset); \ 87 (iter)->sg; _sg_iter_next(iter)) 88 89 #define for_each_sg(sglist, sg, sgmax, iter) \ 90 for (iter = 0, sg = (sglist); iter < (sgmax); iter++, sg = sg_next(sg)) 91 92 typedef struct scatterlist *(sg_alloc_fn) (unsigned int, gfp_t); 93 typedef void (sg_free_fn) (struct scatterlist *, unsigned int); 94 95 static inline void 96 sg_assign_page(struct scatterlist *sg, struct page *page) 97 { 98 unsigned long page_link = sg->page_link & SG_PAGE_LINK_MASK; 99 100 sg->page_link = page_link | (unsigned long)page; 101 } 102 103 static inline void 104 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len, 105 unsigned int offset) 106 { 107 sg_assign_page(sg, page); 108 sg->offset = offset; 109 sg->length = len; 110 } 111 112 static inline struct page * 113 sg_page(struct scatterlist *sg) 114 { 115 return ((struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK)); 116 } 117 118 static inline void 119 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen) 120 { 121 sg_set_page(sg, virt_to_page(buf), buflen, 122 ((uintptr_t)buf) & (PAGE_SIZE - 1)); 123 } 124 125 static inline struct scatterlist * 126 sg_next(struct scatterlist *sg) 127 { 128 if (sg_is_last(sg)) 129 return (NULL); 130 sg++; 131 if (sg_is_chain(sg)) 132 sg = sg_chain_ptr(sg); 133 return (sg); 134 } 135 136 static inline vm_paddr_t 137 sg_phys(struct scatterlist *sg) 138 { 139 return (VM_PAGE_TO_PHYS(sg_page(sg)) + sg->offset); 140 } 141 142 static inline void * 143 sg_virt(struct scatterlist *sg) 144 { 145 146 return ((void *)((unsigned long)page_address(sg_page(sg)) + sg->offset)); 147 } 148 149 static inline void 150 sg_chain(struct scatterlist *prv, unsigned int prv_nents, 151 struct scatterlist *sgl) 152 { 153 struct scatterlist *sg = &prv[prv_nents - 1]; 154 155 sg->offset = 0; 156 sg->length = 0; 157 sg->page_link = ((unsigned long)sgl | 158 SG_PAGE_LINK_CHAIN) & ~SG_PAGE_LINK_LAST; 159 } 160 161 static inline void 162 sg_mark_end(struct scatterlist *sg) 163 { 164 sg->page_link |= SG_PAGE_LINK_LAST; 165 sg->page_link &= ~SG_PAGE_LINK_CHAIN; 166 } 167 168 static inline void 169 sg_init_table(struct scatterlist *sg, unsigned int nents) 170 { 171 bzero(sg, sizeof(*sg) * nents); 172 sg_mark_end(&sg[nents - 1]); 173 } 174 175 static struct scatterlist * 176 sg_kmalloc(unsigned int nents, gfp_t gfp_mask) 177 { 178 if (nents == SG_MAX_SINGLE_ALLOC) { 179 return ((void *)__get_free_page(gfp_mask)); 180 } else 181 return (kmalloc(nents * sizeof(struct scatterlist), gfp_mask)); 182 } 183 184 static inline void 185 sg_kfree(struct scatterlist *sg, unsigned int nents) 186 { 187 if (nents == SG_MAX_SINGLE_ALLOC) { 188 free_page((unsigned long)sg); 189 } else 190 kfree(sg); 191 } 192 193 static inline void 194 __sg_free_table(struct sg_table *table, unsigned int max_ents, 195 bool skip_first_chunk, sg_free_fn * free_fn) 196 { 197 struct scatterlist *sgl, *next; 198 199 if (unlikely(!table->sgl)) 200 return; 201 202 sgl = table->sgl; 203 while (table->orig_nents) { 204 unsigned int alloc_size = table->orig_nents; 205 unsigned int sg_size; 206 207 if (alloc_size > max_ents) { 208 next = sg_chain_ptr(&sgl[max_ents - 1]); 209 alloc_size = max_ents; 210 sg_size = alloc_size - 1; 211 } else { 212 sg_size = alloc_size; 213 next = NULL; 214 } 215 216 table->orig_nents -= sg_size; 217 if (skip_first_chunk) 218 skip_first_chunk = 0; 219 else 220 free_fn(sgl, alloc_size); 221 sgl = next; 222 } 223 224 table->sgl = NULL; 225 } 226 227 static inline void 228 sg_free_table(struct sg_table *table) 229 { 230 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree); 231 } 232 233 static inline int 234 __sg_alloc_table(struct sg_table *table, unsigned int nents, 235 unsigned int max_ents, struct scatterlist *first_chunk, 236 gfp_t gfp_mask, sg_alloc_fn *alloc_fn) 237 { 238 struct scatterlist *sg, *prv; 239 unsigned int left; 240 241 memset(table, 0, sizeof(*table)); 242 243 if (nents == 0) 244 return (-EINVAL); 245 left = nents; 246 prv = NULL; 247 do { 248 unsigned int sg_size; 249 unsigned int alloc_size = left; 250 251 if (alloc_size > max_ents) { 252 alloc_size = max_ents; 253 sg_size = alloc_size - 1; 254 } else 255 sg_size = alloc_size; 256 257 left -= sg_size; 258 259 if (first_chunk) { 260 sg = first_chunk; 261 first_chunk = NULL; 262 } else { 263 sg = alloc_fn(alloc_size, gfp_mask); 264 } 265 if (unlikely(!sg)) { 266 if (prv) 267 table->nents = ++table->orig_nents; 268 269 return (-ENOMEM); 270 } 271 sg_init_table(sg, alloc_size); 272 table->nents = table->orig_nents += sg_size; 273 274 if (prv) 275 sg_chain(prv, max_ents, sg); 276 else 277 table->sgl = sg; 278 279 if (!left) 280 sg_mark_end(&sg[sg_size - 1]); 281 282 prv = sg; 283 } while (left); 284 285 return (0); 286 } 287 288 static inline int 289 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) 290 { 291 int ret; 292 293 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, 294 NULL, gfp_mask, sg_kmalloc); 295 if (unlikely(ret)) 296 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree); 297 298 return (ret); 299 } 300 301 static inline int 302 __sg_alloc_table_from_pages(struct sg_table *sgt, 303 struct page **pages, unsigned int count, 304 unsigned long off, unsigned long size, 305 unsigned int max_segment, gfp_t gfp_mask) 306 { 307 unsigned int i, segs, cur, len; 308 int rc; 309 struct scatterlist *s; 310 311 if (__predict_false(!max_segment || offset_in_page(max_segment))) 312 return (-EINVAL); 313 314 len = 0; 315 for (segs = i = 1; i < count; ++i) { 316 len += PAGE_SIZE; 317 if (len >= max_segment || 318 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) { 319 ++segs; 320 len = 0; 321 } 322 } 323 if (__predict_false((rc = sg_alloc_table(sgt, segs, gfp_mask)))) 324 return (rc); 325 326 cur = 0; 327 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { 328 unsigned long seg_size; 329 unsigned int j; 330 331 len = 0; 332 for (j = cur + 1; j < count; ++j) { 333 len += PAGE_SIZE; 334 if (len >= max_segment || page_to_pfn(pages[j]) != 335 page_to_pfn(pages[j - 1]) + 1) 336 break; 337 } 338 339 seg_size = ((j - cur) << PAGE_SHIFT) - off; 340 sg_set_page(s, pages[cur], min(size, seg_size), off); 341 size -= seg_size; 342 off = 0; 343 cur = j; 344 } 345 return (0); 346 } 347 348 static inline int 349 sg_alloc_table_from_pages(struct sg_table *sgt, 350 struct page **pages, unsigned int count, 351 unsigned long off, unsigned long size, 352 gfp_t gfp_mask) 353 { 354 355 return (__sg_alloc_table_from_pages(sgt, pages, count, off, size, 356 SCATTERLIST_MAX_SEGMENT, gfp_mask)); 357 } 358 359 static inline int 360 sg_nents(struct scatterlist *sg) 361 { 362 int nents; 363 364 for (nents = 0; sg; sg = sg_next(sg)) 365 nents++; 366 return (nents); 367 } 368 369 static inline void 370 __sg_page_iter_start(struct sg_page_iter *piter, 371 struct scatterlist *sglist, unsigned int nents, 372 unsigned long pgoffset) 373 { 374 piter->internal.pg_advance = 0; 375 piter->internal.nents = nents; 376 377 piter->sg = sglist; 378 piter->sg_pgoffset = pgoffset; 379 } 380 381 static inline void 382 _sg_iter_next(struct sg_page_iter *iter) 383 { 384 struct scatterlist *sg; 385 unsigned int pgcount; 386 387 sg = iter->sg; 388 pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT; 389 390 ++iter->sg_pgoffset; 391 while (iter->sg_pgoffset >= pgcount) { 392 iter->sg_pgoffset -= pgcount; 393 sg = sg_next(sg); 394 --iter->maxents; 395 if (sg == NULL || iter->maxents == 0) 396 break; 397 pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT; 398 } 399 iter->sg = sg; 400 } 401 402 static inline int 403 sg_page_count(struct scatterlist *sg) 404 { 405 return (PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT); 406 } 407 408 static inline bool 409 __sg_page_iter_next(struct sg_page_iter *piter) 410 { 411 if (piter->internal.nents == 0) 412 return (0); 413 if (piter->sg == NULL) 414 return (0); 415 416 piter->sg_pgoffset += piter->internal.pg_advance; 417 piter->internal.pg_advance = 1; 418 419 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { 420 piter->sg_pgoffset -= sg_page_count(piter->sg); 421 piter->sg = sg_next(piter->sg); 422 if (--piter->internal.nents == 0) 423 return (0); 424 if (piter->sg == NULL) 425 return (0); 426 } 427 return (1); 428 } 429 430 static inline void 431 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter, 432 unsigned int nents, unsigned long pgoffset) 433 { 434 if (nents) { 435 iter->sg = sgl; 436 iter->sg_pgoffset = pgoffset - 1; 437 iter->maxents = nents; 438 _sg_iter_next(iter); 439 } else { 440 iter->sg = NULL; 441 iter->sg_pgoffset = 0; 442 iter->maxents = 0; 443 } 444 } 445 446 static inline dma_addr_t 447 sg_page_iter_dma_address(struct sg_page_iter *spi) 448 { 449 return (spi->sg->dma_address + (spi->sg_pgoffset << PAGE_SHIFT)); 450 } 451 452 static inline struct page * 453 sg_page_iter_page(struct sg_page_iter *piter) 454 { 455 return (nth_page(sg_page(piter->sg), piter->sg_pgoffset)); 456 } 457 458 459 #endif /* _LINUX_SCATTERLIST_H_ */ 460