1 /* 2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the Free 6 * Software Foundation; either version 2 of the License, or (at your option) 7 * any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * The full GNU General Public License is included in this distribution in the 19 * file called COPYING. 20 */ 21 22 /* 23 * This code implements the DMA subsystem. It provides a HW-neutral interface 24 * for other kernel code to use asynchronous memory copy capabilities, 25 * if present, and allows different HW DMA drivers to register as providing 26 * this capability. 27 * 28 * Due to the fact we are accelerating what is already a relatively fast 29 * operation, the code goes to great lengths to avoid additional overhead, 30 * such as locking. 31 * 32 * LOCKING: 33 * 34 * The subsystem keeps two global lists, dma_device_list and dma_client_list. 35 * Both of these are protected by a mutex, dma_list_mutex. 36 * 37 * Each device has a channels list, which runs unlocked but is never modified 38 * once the device is registered, it's just setup by the driver. 39 * 40 * Each client has a channels list, it's only modified under the client->lock 41 * and in an RCU callback, so it's safe to read under rcu_read_lock(). 42 * 43 * Each device has a kref, which is initialized to 1 when the device is 44 * registered. A kref_put is done for each class_device registered. When the 45 * class_device is released, the coresponding kref_put is done in the release 46 * method. Every time one of the device's channels is allocated to a client, 47 * a kref_get occurs. When the channel is freed, the coresponding kref_put 48 * happens. The device's release function does a completion, so 49 * unregister_device does a remove event, class_device_unregister, a kref_put 50 * for the first reference, then waits on the completion for all other 51 * references to finish. 52 * 53 * Each channel has an open-coded implementation of Rusty Russell's "bigref," 54 * with a kref and a per_cpu local_t. A single reference is set when on an 55 * ADDED event, and removed with a REMOVE event. Net DMA client takes an 56 * extra reference per outstanding transaction. The relase function does a 57 * kref_put on the device. -ChrisL 58 */ 59 60 #include <linux/init.h> 61 #include <linux/module.h> 62 #include <linux/device.h> 63 #include <linux/dmaengine.h> 64 #include <linux/hardirq.h> 65 #include <linux/spinlock.h> 66 #include <linux/percpu.h> 67 #include <linux/rcupdate.h> 68 #include <linux/mutex.h> 69 70 static DEFINE_MUTEX(dma_list_mutex); 71 static LIST_HEAD(dma_device_list); 72 static LIST_HEAD(dma_client_list); 73 74 /* --- sysfs implementation --- */ 75 76 static ssize_t show_memcpy_count(struct class_device *cd, char *buf) 77 { 78 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); 79 unsigned long count = 0; 80 int i; 81 82 for_each_possible_cpu(i) 83 count += per_cpu_ptr(chan->local, i)->memcpy_count; 84 85 return sprintf(buf, "%lu\n", count); 86 } 87 88 static ssize_t show_bytes_transferred(struct class_device *cd, char *buf) 89 { 90 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); 91 unsigned long count = 0; 92 int i; 93 94 for_each_possible_cpu(i) 95 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 96 97 return sprintf(buf, "%lu\n", count); 98 } 99 100 static ssize_t show_in_use(struct class_device *cd, char *buf) 101 { 102 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); 103 104 return sprintf(buf, "%d\n", (chan->client ? 1 : 0)); 105 } 106 107 static struct class_device_attribute dma_class_attrs[] = { 108 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), 109 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), 110 __ATTR(in_use, S_IRUGO, show_in_use, NULL), 111 __ATTR_NULL 112 }; 113 114 static void dma_async_device_cleanup(struct kref *kref); 115 116 static void dma_class_dev_release(struct class_device *cd) 117 { 118 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); 119 kref_put(&chan->device->refcount, dma_async_device_cleanup); 120 } 121 122 static struct class dma_devclass = { 123 .name = "dma", 124 .class_dev_attrs = dma_class_attrs, 125 .release = dma_class_dev_release, 126 }; 127 128 /* --- client and device registration --- */ 129 130 /** 131 * dma_client_chan_alloc - try to allocate a channel to a client 132 * @client: &dma_client 133 * 134 * Called with dma_list_mutex held. 135 */ 136 static struct dma_chan *dma_client_chan_alloc(struct dma_client *client) 137 { 138 struct dma_device *device; 139 struct dma_chan *chan; 140 unsigned long flags; 141 int desc; /* allocated descriptor count */ 142 143 /* Find a channel, any DMA engine will do */ 144 list_for_each_entry(device, &dma_device_list, global_node) { 145 list_for_each_entry(chan, &device->channels, device_node) { 146 if (chan->client) 147 continue; 148 149 desc = chan->device->device_alloc_chan_resources(chan); 150 if (desc >= 0) { 151 kref_get(&device->refcount); 152 kref_init(&chan->refcount); 153 chan->slow_ref = 0; 154 INIT_RCU_HEAD(&chan->rcu); 155 chan->client = client; 156 spin_lock_irqsave(&client->lock, flags); 157 list_add_tail_rcu(&chan->client_node, 158 &client->channels); 159 spin_unlock_irqrestore(&client->lock, flags); 160 return chan; 161 } 162 } 163 } 164 165 return NULL; 166 } 167 168 /** 169 * dma_chan_cleanup - release a DMA channel's resources 170 * @kref: kernel reference structure that contains the DMA channel device 171 */ 172 void dma_chan_cleanup(struct kref *kref) 173 { 174 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); 175 chan->device->device_free_chan_resources(chan); 176 chan->client = NULL; 177 kref_put(&chan->device->refcount, dma_async_device_cleanup); 178 } 179 180 static void dma_chan_free_rcu(struct rcu_head *rcu) 181 { 182 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu); 183 int bias = 0x7FFFFFFF; 184 int i; 185 for_each_possible_cpu(i) 186 bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount); 187 atomic_sub(bias, &chan->refcount.refcount); 188 kref_put(&chan->refcount, dma_chan_cleanup); 189 } 190 191 static void dma_client_chan_free(struct dma_chan *chan) 192 { 193 atomic_add(0x7FFFFFFF, &chan->refcount.refcount); 194 chan->slow_ref = 1; 195 call_rcu(&chan->rcu, dma_chan_free_rcu); 196 } 197 198 /** 199 * dma_chans_rebalance - reallocate channels to clients 200 * 201 * When the number of DMA channel in the system changes, 202 * channels need to be rebalanced among clients. 203 */ 204 static void dma_chans_rebalance(void) 205 { 206 struct dma_client *client; 207 struct dma_chan *chan; 208 unsigned long flags; 209 210 mutex_lock(&dma_list_mutex); 211 212 list_for_each_entry(client, &dma_client_list, global_node) { 213 while (client->chans_desired > client->chan_count) { 214 chan = dma_client_chan_alloc(client); 215 if (!chan) 216 break; 217 client->chan_count++; 218 client->event_callback(client, 219 chan, 220 DMA_RESOURCE_ADDED); 221 } 222 while (client->chans_desired < client->chan_count) { 223 spin_lock_irqsave(&client->lock, flags); 224 chan = list_entry(client->channels.next, 225 struct dma_chan, 226 client_node); 227 list_del_rcu(&chan->client_node); 228 spin_unlock_irqrestore(&client->lock, flags); 229 client->chan_count--; 230 client->event_callback(client, 231 chan, 232 DMA_RESOURCE_REMOVED); 233 dma_client_chan_free(chan); 234 } 235 } 236 237 mutex_unlock(&dma_list_mutex); 238 } 239 240 /** 241 * dma_async_client_register - allocate and register a &dma_client 242 * @event_callback: callback for notification of channel addition/removal 243 */ 244 struct dma_client *dma_async_client_register(dma_event_callback event_callback) 245 { 246 struct dma_client *client; 247 248 client = kzalloc(sizeof(*client), GFP_KERNEL); 249 if (!client) 250 return NULL; 251 252 INIT_LIST_HEAD(&client->channels); 253 spin_lock_init(&client->lock); 254 client->chans_desired = 0; 255 client->chan_count = 0; 256 client->event_callback = event_callback; 257 258 mutex_lock(&dma_list_mutex); 259 list_add_tail(&client->global_node, &dma_client_list); 260 mutex_unlock(&dma_list_mutex); 261 262 return client; 263 } 264 265 /** 266 * dma_async_client_unregister - unregister a client and free the &dma_client 267 * @client: &dma_client to free 268 * 269 * Force frees any allocated DMA channels, frees the &dma_client memory 270 */ 271 void dma_async_client_unregister(struct dma_client *client) 272 { 273 struct dma_chan *chan; 274 275 if (!client) 276 return; 277 278 rcu_read_lock(); 279 list_for_each_entry_rcu(chan, &client->channels, client_node) 280 dma_client_chan_free(chan); 281 rcu_read_unlock(); 282 283 mutex_lock(&dma_list_mutex); 284 list_del(&client->global_node); 285 mutex_unlock(&dma_list_mutex); 286 287 kfree(client); 288 dma_chans_rebalance(); 289 } 290 291 /** 292 * dma_async_client_chan_request - request DMA channels 293 * @client: &dma_client 294 * @number: count of DMA channels requested 295 * 296 * Clients call dma_async_client_chan_request() to specify how many 297 * DMA channels they need, 0 to free all currently allocated. 298 * The resulting allocations/frees are indicated to the client via the 299 * event callback. 300 */ 301 void dma_async_client_chan_request(struct dma_client *client, 302 unsigned int number) 303 { 304 client->chans_desired = number; 305 dma_chans_rebalance(); 306 } 307 308 /** 309 * dma_async_device_register - registers DMA devices found 310 * @device: &dma_device 311 */ 312 int dma_async_device_register(struct dma_device *device) 313 { 314 static int id; 315 int chancnt = 0; 316 struct dma_chan* chan; 317 318 if (!device) 319 return -ENODEV; 320 321 init_completion(&device->done); 322 kref_init(&device->refcount); 323 device->dev_id = id++; 324 325 /* represent channels in sysfs. Probably want devs too */ 326 list_for_each_entry(chan, &device->channels, device_node) { 327 chan->local = alloc_percpu(typeof(*chan->local)); 328 if (chan->local == NULL) 329 continue; 330 331 chan->chan_id = chancnt++; 332 chan->class_dev.class = &dma_devclass; 333 chan->class_dev.dev = NULL; 334 snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d", 335 device->dev_id, chan->chan_id); 336 337 kref_get(&device->refcount); 338 class_device_register(&chan->class_dev); 339 } 340 341 mutex_lock(&dma_list_mutex); 342 list_add_tail(&device->global_node, &dma_device_list); 343 mutex_unlock(&dma_list_mutex); 344 345 dma_chans_rebalance(); 346 347 return 0; 348 } 349 350 /** 351 * dma_async_device_cleanup - function called when all references are released 352 * @kref: kernel reference object 353 */ 354 static void dma_async_device_cleanup(struct kref *kref) 355 { 356 struct dma_device *device; 357 358 device = container_of(kref, struct dma_device, refcount); 359 complete(&device->done); 360 } 361 362 /** 363 * dma_async_device_unregister - unregisters DMA devices 364 * @device: &dma_device 365 */ 366 void dma_async_device_unregister(struct dma_device *device) 367 { 368 struct dma_chan *chan; 369 unsigned long flags; 370 371 mutex_lock(&dma_list_mutex); 372 list_del(&device->global_node); 373 mutex_unlock(&dma_list_mutex); 374 375 list_for_each_entry(chan, &device->channels, device_node) { 376 if (chan->client) { 377 spin_lock_irqsave(&chan->client->lock, flags); 378 list_del(&chan->client_node); 379 chan->client->chan_count--; 380 spin_unlock_irqrestore(&chan->client->lock, flags); 381 chan->client->event_callback(chan->client, 382 chan, 383 DMA_RESOURCE_REMOVED); 384 dma_client_chan_free(chan); 385 } 386 class_device_unregister(&chan->class_dev); 387 } 388 dma_chans_rebalance(); 389 390 kref_put(&device->refcount, dma_async_device_cleanup); 391 wait_for_completion(&device->done); 392 } 393 394 static int __init dma_bus_init(void) 395 { 396 mutex_init(&dma_list_mutex); 397 return class_register(&dma_devclass); 398 } 399 400 subsys_initcall(dma_bus_init); 401 402 EXPORT_SYMBOL(dma_async_client_register); 403 EXPORT_SYMBOL(dma_async_client_unregister); 404 EXPORT_SYMBOL(dma_async_client_chan_request); 405 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); 406 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); 407 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); 408 EXPORT_SYMBOL(dma_async_memcpy_complete); 409 EXPORT_SYMBOL(dma_async_memcpy_issue_pending); 410 EXPORT_SYMBOL(dma_async_device_register); 411 EXPORT_SYMBOL(dma_async_device_unregister); 412 EXPORT_SYMBOL(dma_chan_cleanup); 413