1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2014-2015 Analog Devices Inc. 4 * Author: Lars-Peter Clausen <lars@metafoo.de> 5 */ 6 7 #include <linux/slab.h> 8 #include <linux/kernel.h> 9 #include <linux/dmaengine.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/spinlock.h> 12 #include <linux/err.h> 13 #include <linux/module.h> 14 15 #include <linux/iio/iio.h> 16 #include <linux/iio/sysfs.h> 17 #include <linux/iio/buffer.h> 18 #include <linux/iio/buffer_impl.h> 19 #include <linux/iio/buffer-dma.h> 20 #include <linux/iio/buffer-dmaengine.h> 21 22 /* 23 * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure 24 * with the DMAengine framework. The generic IIO DMA buffer infrastructure is 25 * used to manage the buffer memory and implement the IIO buffer operations 26 * while the DMAengine framework is used to perform the DMA transfers. Combined 27 * this results in a device independent fully functional DMA buffer 28 * implementation that can be used by device drivers for peripherals which are 29 * connected to a DMA controller which has a DMAengine driver implementation. 30 */ 31 32 struct dmaengine_buffer { 33 struct iio_dma_buffer_queue queue; 34 35 struct dma_chan *chan; 36 struct list_head active; 37 38 size_t align; 39 size_t max_size; 40 }; 41 42 static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer( 43 struct iio_buffer *buffer) 44 { 45 return container_of(buffer, struct dmaengine_buffer, queue.buffer); 46 } 47 48 static void iio_dmaengine_buffer_block_done(void *data, 49 const struct dmaengine_result *result) 50 { 51 struct iio_dma_buffer_block *block = data; 52 unsigned long flags; 53 54 spin_lock_irqsave(&block->queue->list_lock, flags); 55 list_del(&block->head); 56 spin_unlock_irqrestore(&block->queue->list_lock, flags); 57 block->bytes_used -= result->residue; 58 iio_dma_buffer_block_done(block); 59 } 60 61 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue, 62 struct iio_dma_buffer_block *block) 63 { 64 struct dmaengine_buffer *dmaengine_buffer = 65 iio_buffer_to_dmaengine_buffer(&queue->buffer); 66 struct dma_async_tx_descriptor *desc; 67 enum dma_transfer_direction dma_dir; 68 struct scatterlist *sgl; 69 struct dma_vec *vecs; 70 size_t max_size; 71 dma_cookie_t cookie; 72 size_t len_total; 73 unsigned int i; 74 int nents; 75 76 max_size = min(block->size, dmaengine_buffer->max_size); 77 max_size = round_down(max_size, dmaengine_buffer->align); 78 79 if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) 80 dma_dir = DMA_DEV_TO_MEM; 81 else 82 dma_dir = DMA_MEM_TO_DEV; 83 84 if (block->sg_table) { 85 sgl = block->sg_table->sgl; 86 nents = sg_nents_for_len(sgl, block->bytes_used); 87 if (nents < 0) 88 return nents; 89 90 vecs = kmalloc_array(nents, sizeof(*vecs), GFP_ATOMIC); 91 if (!vecs) 92 return -ENOMEM; 93 94 len_total = block->bytes_used; 95 96 for (i = 0; i < nents; i++) { 97 vecs[i].addr = sg_dma_address(sgl); 98 vecs[i].len = min(sg_dma_len(sgl), len_total); 99 len_total -= vecs[i].len; 100 101 sgl = sg_next(sgl); 102 } 103 104 desc = dmaengine_prep_peripheral_dma_vec(dmaengine_buffer->chan, 105 vecs, nents, dma_dir, 106 DMA_PREP_INTERRUPT); 107 kfree(vecs); 108 } else { 109 max_size = min(block->size, dmaengine_buffer->max_size); 110 max_size = round_down(max_size, dmaengine_buffer->align); 111 112 if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) 113 block->bytes_used = max_size; 114 115 if (!block->bytes_used || block->bytes_used > max_size) 116 return -EINVAL; 117 118 desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, 119 block->phys_addr, 120 block->bytes_used, 121 dma_dir, 122 DMA_PREP_INTERRUPT); 123 } 124 if (!desc) 125 return -ENOMEM; 126 127 desc->callback_result = iio_dmaengine_buffer_block_done; 128 desc->callback_param = block; 129 130 cookie = dmaengine_submit(desc); 131 if (dma_submit_error(cookie)) 132 return dma_submit_error(cookie); 133 134 spin_lock_irq(&dmaengine_buffer->queue.list_lock); 135 list_add_tail(&block->head, &dmaengine_buffer->active); 136 spin_unlock_irq(&dmaengine_buffer->queue.list_lock); 137 138 dma_async_issue_pending(dmaengine_buffer->chan); 139 140 return 0; 141 } 142 143 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue) 144 { 145 struct dmaengine_buffer *dmaengine_buffer = 146 iio_buffer_to_dmaengine_buffer(&queue->buffer); 147 148 dmaengine_terminate_sync(dmaengine_buffer->chan); 149 iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active); 150 } 151 152 static void iio_dmaengine_buffer_release(struct iio_buffer *buf) 153 { 154 struct dmaengine_buffer *dmaengine_buffer = 155 iio_buffer_to_dmaengine_buffer(buf); 156 157 iio_dma_buffer_release(&dmaengine_buffer->queue); 158 kfree(dmaengine_buffer); 159 } 160 161 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = { 162 .read = iio_dma_buffer_read, 163 .write = iio_dma_buffer_write, 164 .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum, 165 .set_length = iio_dma_buffer_set_length, 166 .request_update = iio_dma_buffer_request_update, 167 .enable = iio_dma_buffer_enable, 168 .disable = iio_dma_buffer_disable, 169 .data_available = iio_dma_buffer_usage, 170 .space_available = iio_dma_buffer_usage, 171 .release = iio_dmaengine_buffer_release, 172 173 .enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf, 174 .attach_dmabuf = iio_dma_buffer_attach_dmabuf, 175 .detach_dmabuf = iio_dma_buffer_detach_dmabuf, 176 177 .lock_queue = iio_dma_buffer_lock_queue, 178 .unlock_queue = iio_dma_buffer_unlock_queue, 179 180 .get_dma_dev = iio_dma_buffer_get_dma_dev, 181 182 .modes = INDIO_BUFFER_HARDWARE, 183 .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK, 184 }; 185 186 static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = { 187 .submit = iio_dmaengine_buffer_submit_block, 188 .abort = iio_dmaengine_buffer_abort, 189 }; 190 191 static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev, 192 struct device_attribute *attr, char *buf) 193 { 194 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 195 struct dmaengine_buffer *dmaengine_buffer = 196 iio_buffer_to_dmaengine_buffer(buffer); 197 198 return sysfs_emit(buf, "%zu\n", dmaengine_buffer->align); 199 } 200 201 static IIO_DEVICE_ATTR(length_align_bytes, 0444, 202 iio_dmaengine_buffer_get_length_align, NULL, 0); 203 204 static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = { 205 &iio_dev_attr_length_align_bytes, 206 NULL, 207 }; 208 209 /** 210 * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine 211 * @chan: DMA channel. 212 * 213 * This allocates a new IIO buffer which internally uses the DMAengine framework 214 * to perform its transfers. 215 * 216 * Once done using the buffer iio_dmaengine_buffer_free() should be used to 217 * release it. 218 */ 219 static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan) 220 { 221 struct dmaengine_buffer *dmaengine_buffer; 222 unsigned int width, src_width, dest_width; 223 struct dma_slave_caps caps; 224 int ret; 225 226 ret = dma_get_slave_caps(chan, &caps); 227 if (ret < 0) 228 return ERR_PTR(ret); 229 230 dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL); 231 if (!dmaengine_buffer) 232 return ERR_PTR(-ENOMEM); 233 234 /* Needs to be aligned to the maximum of the minimums */ 235 if (caps.src_addr_widths) 236 src_width = __ffs(caps.src_addr_widths); 237 else 238 src_width = 1; 239 if (caps.dst_addr_widths) 240 dest_width = __ffs(caps.dst_addr_widths); 241 else 242 dest_width = 1; 243 width = max(src_width, dest_width); 244 245 INIT_LIST_HEAD(&dmaengine_buffer->active); 246 dmaengine_buffer->chan = chan; 247 dmaengine_buffer->align = width; 248 dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev); 249 250 iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev, 251 &iio_dmaengine_default_ops); 252 253 dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs; 254 dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops; 255 256 return &dmaengine_buffer->queue.buffer; 257 } 258 259 /** 260 * iio_dmaengine_buffer_free() - Free dmaengine buffer 261 * @buffer: Buffer to free 262 * 263 * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc(). 264 */ 265 static void iio_dmaengine_buffer_free(struct iio_buffer *buffer) 266 { 267 struct dmaengine_buffer *dmaengine_buffer = 268 iio_buffer_to_dmaengine_buffer(buffer); 269 270 iio_dma_buffer_exit(&dmaengine_buffer->queue); 271 iio_buffer_put(buffer); 272 } 273 274 /** 275 * iio_dmaengine_buffer_teardown() - Releases DMA channel and frees buffer 276 * @buffer: Buffer to free 277 * 278 * Releases the DMA channel and frees the buffer previously setup with 279 * iio_dmaengine_buffer_setup_ext(). 280 */ 281 void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer) 282 { 283 struct dmaengine_buffer *dmaengine_buffer = 284 iio_buffer_to_dmaengine_buffer(buffer); 285 struct dma_chan *chan = dmaengine_buffer->chan; 286 287 iio_dmaengine_buffer_free(buffer); 288 dma_release_channel(chan); 289 } 290 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_teardown, "IIO_DMAENGINE_BUFFER"); 291 292 static struct iio_buffer 293 *__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev, 294 struct dma_chan *chan, 295 enum iio_buffer_direction dir) 296 { 297 struct iio_buffer *buffer; 298 int ret; 299 300 buffer = iio_dmaengine_buffer_alloc(chan); 301 if (IS_ERR(buffer)) 302 return ERR_CAST(buffer); 303 304 indio_dev->modes |= INDIO_BUFFER_HARDWARE; 305 306 buffer->direction = dir; 307 308 ret = iio_device_attach_buffer(indio_dev, buffer); 309 if (ret) { 310 iio_dmaengine_buffer_free(buffer); 311 return ERR_PTR(ret); 312 } 313 314 return buffer; 315 } 316 317 /** 318 * iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device 319 * @dev: DMA channel consumer device 320 * @indio_dev: IIO device to which to attach this buffer. 321 * @channel: DMA channel name, typically "rx". 322 * @dir: Direction of buffer (in or out) 323 * 324 * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() 325 * and attaches it to an IIO device with iio_device_attach_buffer(). 326 * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the 327 * IIO device. 328 * 329 * Once done using the buffer iio_dmaengine_buffer_teardown() should be used to 330 * release it. 331 */ 332 struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev, 333 struct iio_dev *indio_dev, 334 const char *channel, 335 enum iio_buffer_direction dir) 336 { 337 struct dma_chan *chan; 338 struct iio_buffer *buffer; 339 340 chan = dma_request_chan(dev, channel); 341 if (IS_ERR(chan)) 342 return ERR_CAST(chan); 343 344 buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir); 345 if (IS_ERR(buffer)) 346 dma_release_channel(chan); 347 348 return buffer; 349 } 350 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER"); 351 352 static void devm_iio_dmaengine_buffer_teardown(void *buffer) 353 { 354 iio_dmaengine_buffer_teardown(buffer); 355 } 356 357 /** 358 * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device 359 * @dev: Device for devm ownership and DMA channel consumer device 360 * @indio_dev: IIO device to which to attach this buffer. 361 * @channel: DMA channel name, typically "rx". 362 * @dir: Direction of buffer (in or out) 363 * 364 * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() 365 * and attaches it to an IIO device with iio_device_attach_buffer(). 366 * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the 367 * IIO device. 368 */ 369 int devm_iio_dmaengine_buffer_setup_ext(struct device *dev, 370 struct iio_dev *indio_dev, 371 const char *channel, 372 enum iio_buffer_direction dir) 373 { 374 struct iio_buffer *buffer; 375 376 buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir); 377 if (IS_ERR(buffer)) 378 return PTR_ERR(buffer); 379 380 return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_teardown, 381 buffer); 382 } 383 EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER"); 384 385 static void devm_iio_dmaengine_buffer_free(void *buffer) 386 { 387 iio_dmaengine_buffer_free(buffer); 388 } 389 390 /** 391 * devm_iio_dmaengine_buffer_setup_with_handle() - Setup a DMA buffer for an 392 * IIO device 393 * @dev: Device for devm ownership 394 * @indio_dev: IIO device to which to attach this buffer. 395 * @chan: DMA channel 396 * @dir: Direction of buffer (in or out) 397 * 398 * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() 399 * and attaches it to an IIO device with iio_device_attach_buffer(). 400 * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the 401 * IIO device. 402 * 403 * This is the same as devm_iio_dmaengine_buffer_setup_ext() except that the 404 * caller manages requesting and releasing the DMA channel handle. 405 */ 406 int devm_iio_dmaengine_buffer_setup_with_handle(struct device *dev, 407 struct iio_dev *indio_dev, 408 struct dma_chan *chan, 409 enum iio_buffer_direction dir) 410 { 411 struct iio_buffer *buffer; 412 413 buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir); 414 if (IS_ERR(buffer)) 415 return PTR_ERR(buffer); 416 417 return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_free, 418 buffer); 419 } 420 EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_with_handle, 421 "IIO_DMAENGINE_BUFFER"); 422 423 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 424 MODULE_DESCRIPTION("DMA buffer for the IIO framework"); 425 MODULE_LICENSE("GPL"); 426 MODULE_IMPORT_NS("IIO_DMA_BUFFER"); 427