1 /* 2 * $Id: mtd_blkdevs.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $ 3 * 4 * (C) 2003 David Woodhouse <dwmw2@infradead.org> 5 * 6 * Interface to Linux 2.5 block layer for MTD 'translation layers'. 7 * 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/module.h> 13 #include <linux/list.h> 14 #include <linux/fs.h> 15 #include <linux/mtd/blktrans.h> 16 #include <linux/mtd/mtd.h> 17 #include <linux/blkdev.h> 18 #include <linux/blkpg.h> 19 #include <linux/spinlock.h> 20 #include <linux/hdreg.h> 21 #include <linux/init.h> 22 #include <linux/mutex.h> 23 #include <linux/kthread.h> 24 #include <asm/uaccess.h> 25 26 #include "mtdcore.h" 27 28 static LIST_HEAD(blktrans_majors); 29 30 struct mtd_blkcore_priv { 31 struct task_struct *thread; 32 struct request_queue *rq; 33 spinlock_t queue_lock; 34 }; 35 36 static int do_blktrans_request(struct mtd_blktrans_ops *tr, 37 struct mtd_blktrans_dev *dev, 38 struct request *req) 39 { 40 unsigned long block, nsect; 41 char *buf; 42 43 block = req->sector << 9 >> tr->blkshift; 44 nsect = req->current_nr_sectors << 9 >> tr->blkshift; 45 46 buf = req->buffer; 47 48 if (!blk_fs_request(req)) 49 return 0; 50 51 if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) 52 return 0; 53 54 switch(rq_data_dir(req)) { 55 case READ: 56 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 57 if (tr->readsect(dev, block, buf)) 58 return 0; 59 return 1; 60 61 case WRITE: 62 if (!tr->writesect) 63 return 0; 64 65 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 66 if (tr->writesect(dev, block, buf)) 67 return 0; 68 return 1; 69 70 default: 71 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 72 return 0; 73 } 74 } 75 76 static int mtd_blktrans_thread(void *arg) 77 { 78 struct mtd_blktrans_ops *tr = arg; 79 struct request_queue *rq = tr->blkcore_priv->rq; 80 81 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 82 current->flags |= PF_MEMALLOC | PF_NOFREEZE; 83 84 spin_lock_irq(rq->queue_lock); 85 while (!kthread_should_stop()) { 86 struct request *req; 87 struct mtd_blktrans_dev *dev; 88 int res = 0; 89 90 req = elv_next_request(rq); 91 92 if (!req) { 93 set_current_state(TASK_INTERRUPTIBLE); 94 spin_unlock_irq(rq->queue_lock); 95 schedule(); 96 spin_lock_irq(rq->queue_lock); 97 continue; 98 } 99 100 dev = req->rq_disk->private_data; 101 tr = dev->tr; 102 103 spin_unlock_irq(rq->queue_lock); 104 105 mutex_lock(&dev->lock); 106 res = do_blktrans_request(tr, dev, req); 107 mutex_unlock(&dev->lock); 108 109 spin_lock_irq(rq->queue_lock); 110 111 end_request(req, res); 112 } 113 spin_unlock_irq(rq->queue_lock); 114 115 return 0; 116 } 117 118 static void mtd_blktrans_request(struct request_queue *rq) 119 { 120 struct mtd_blktrans_ops *tr = rq->queuedata; 121 wake_up_process(tr->blkcore_priv->thread); 122 } 123 124 125 static int blktrans_open(struct inode *i, struct file *f) 126 { 127 struct mtd_blktrans_dev *dev; 128 struct mtd_blktrans_ops *tr; 129 int ret = -ENODEV; 130 131 dev = i->i_bdev->bd_disk->private_data; 132 tr = dev->tr; 133 134 if (!try_module_get(dev->mtd->owner)) 135 goto out; 136 137 if (!try_module_get(tr->owner)) 138 goto out_tr; 139 140 /* FIXME: Locking. A hot pluggable device can go away 141 (del_mtd_device can be called for it) without its module 142 being unloaded. */ 143 dev->mtd->usecount++; 144 145 ret = 0; 146 if (tr->open && (ret = tr->open(dev))) { 147 dev->mtd->usecount--; 148 module_put(dev->mtd->owner); 149 out_tr: 150 module_put(tr->owner); 151 } 152 out: 153 return ret; 154 } 155 156 static int blktrans_release(struct inode *i, struct file *f) 157 { 158 struct mtd_blktrans_dev *dev; 159 struct mtd_blktrans_ops *tr; 160 int ret = 0; 161 162 dev = i->i_bdev->bd_disk->private_data; 163 tr = dev->tr; 164 165 if (tr->release) 166 ret = tr->release(dev); 167 168 if (!ret) { 169 dev->mtd->usecount--; 170 module_put(dev->mtd->owner); 171 module_put(tr->owner); 172 } 173 174 return ret; 175 } 176 177 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) 178 { 179 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 180 181 if (dev->tr->getgeo) 182 return dev->tr->getgeo(dev, geo); 183 return -ENOTTY; 184 } 185 186 static int blktrans_ioctl(struct inode *inode, struct file *file, 187 unsigned int cmd, unsigned long arg) 188 { 189 struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data; 190 struct mtd_blktrans_ops *tr = dev->tr; 191 192 switch (cmd) { 193 case BLKFLSBUF: 194 if (tr->flush) 195 return tr->flush(dev); 196 /* The core code did the work, we had nothing to do. */ 197 return 0; 198 default: 199 return -ENOTTY; 200 } 201 } 202 203 static struct block_device_operations mtd_blktrans_ops = { 204 .owner = THIS_MODULE, 205 .open = blktrans_open, 206 .release = blktrans_release, 207 .ioctl = blktrans_ioctl, 208 .getgeo = blktrans_getgeo, 209 }; 210 211 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) 212 { 213 struct mtd_blktrans_ops *tr = new->tr; 214 struct list_head *this; 215 int last_devnum = -1; 216 struct gendisk *gd; 217 218 if (mutex_trylock(&mtd_table_mutex)) { 219 mutex_unlock(&mtd_table_mutex); 220 BUG(); 221 } 222 223 list_for_each(this, &tr->devs) { 224 struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list); 225 if (new->devnum == -1) { 226 /* Use first free number */ 227 if (d->devnum != last_devnum+1) { 228 /* Found a free devnum. Plug it in here */ 229 new->devnum = last_devnum+1; 230 list_add_tail(&new->list, &d->list); 231 goto added; 232 } 233 } else if (d->devnum == new->devnum) { 234 /* Required number taken */ 235 return -EBUSY; 236 } else if (d->devnum > new->devnum) { 237 /* Required number was free */ 238 list_add_tail(&new->list, &d->list); 239 goto added; 240 } 241 last_devnum = d->devnum; 242 } 243 if (new->devnum == -1) 244 new->devnum = last_devnum+1; 245 246 if ((new->devnum << tr->part_bits) > 256) { 247 return -EBUSY; 248 } 249 250 mutex_init(&new->lock); 251 list_add_tail(&new->list, &tr->devs); 252 added: 253 if (!tr->writesect) 254 new->readonly = 1; 255 256 gd = alloc_disk(1 << tr->part_bits); 257 if (!gd) { 258 list_del(&new->list); 259 return -ENOMEM; 260 } 261 gd->major = tr->major; 262 gd->first_minor = (new->devnum) << tr->part_bits; 263 gd->fops = &mtd_blktrans_ops; 264 265 if (tr->part_bits) 266 if (new->devnum < 26) 267 snprintf(gd->disk_name, sizeof(gd->disk_name), 268 "%s%c", tr->name, 'a' + new->devnum); 269 else 270 snprintf(gd->disk_name, sizeof(gd->disk_name), 271 "%s%c%c", tr->name, 272 'a' - 1 + new->devnum / 26, 273 'a' + new->devnum % 26); 274 else 275 snprintf(gd->disk_name, sizeof(gd->disk_name), 276 "%s%d", tr->name, new->devnum); 277 278 /* 2.5 has capacity in units of 512 bytes while still 279 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */ 280 set_capacity(gd, (new->size * tr->blksize) >> 9); 281 282 gd->private_data = new; 283 new->blkcore_priv = gd; 284 gd->queue = tr->blkcore_priv->rq; 285 286 if (new->readonly) 287 set_disk_ro(gd, 1); 288 289 add_disk(gd); 290 291 return 0; 292 } 293 294 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) 295 { 296 if (mutex_trylock(&mtd_table_mutex)) { 297 mutex_unlock(&mtd_table_mutex); 298 BUG(); 299 } 300 301 list_del(&old->list); 302 303 del_gendisk(old->blkcore_priv); 304 put_disk(old->blkcore_priv); 305 306 return 0; 307 } 308 309 static void blktrans_notify_remove(struct mtd_info *mtd) 310 { 311 struct list_head *this, *this2, *next; 312 313 list_for_each(this, &blktrans_majors) { 314 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list); 315 316 list_for_each_safe(this2, next, &tr->devs) { 317 struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list); 318 319 if (dev->mtd == mtd) 320 tr->remove_dev(dev); 321 } 322 } 323 } 324 325 static void blktrans_notify_add(struct mtd_info *mtd) 326 { 327 struct list_head *this; 328 329 if (mtd->type == MTD_ABSENT) 330 return; 331 332 list_for_each(this, &blktrans_majors) { 333 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list); 334 335 tr->add_mtd(tr, mtd); 336 } 337 338 } 339 340 static struct mtd_notifier blktrans_notifier = { 341 .add = blktrans_notify_add, 342 .remove = blktrans_notify_remove, 343 }; 344 345 int register_mtd_blktrans(struct mtd_blktrans_ops *tr) 346 { 347 int ret, i; 348 349 /* Register the notifier if/when the first device type is 350 registered, to prevent the link/init ordering from fucking 351 us over. */ 352 if (!blktrans_notifier.list.next) 353 register_mtd_user(&blktrans_notifier); 354 355 tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL); 356 if (!tr->blkcore_priv) 357 return -ENOMEM; 358 359 mutex_lock(&mtd_table_mutex); 360 361 ret = register_blkdev(tr->major, tr->name); 362 if (ret) { 363 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 364 tr->name, tr->major, ret); 365 kfree(tr->blkcore_priv); 366 mutex_unlock(&mtd_table_mutex); 367 return ret; 368 } 369 spin_lock_init(&tr->blkcore_priv->queue_lock); 370 371 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock); 372 if (!tr->blkcore_priv->rq) { 373 unregister_blkdev(tr->major, tr->name); 374 kfree(tr->blkcore_priv); 375 mutex_unlock(&mtd_table_mutex); 376 return -ENOMEM; 377 } 378 379 tr->blkcore_priv->rq->queuedata = tr; 380 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 381 tr->blkshift = ffs(tr->blksize) - 1; 382 383 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, 384 "%sd", tr->name); 385 if (IS_ERR(tr->blkcore_priv->thread)) { 386 blk_cleanup_queue(tr->blkcore_priv->rq); 387 unregister_blkdev(tr->major, tr->name); 388 kfree(tr->blkcore_priv); 389 mutex_unlock(&mtd_table_mutex); 390 return PTR_ERR(tr->blkcore_priv->thread); 391 } 392 393 INIT_LIST_HEAD(&tr->devs); 394 list_add(&tr->list, &blktrans_majors); 395 396 for (i=0; i<MAX_MTD_DEVICES; i++) { 397 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT) 398 tr->add_mtd(tr, mtd_table[i]); 399 } 400 401 mutex_unlock(&mtd_table_mutex); 402 403 return 0; 404 } 405 406 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) 407 { 408 struct list_head *this, *next; 409 410 mutex_lock(&mtd_table_mutex); 411 412 /* Clean up the kernel thread */ 413 kthread_stop(tr->blkcore_priv->thread); 414 415 /* Remove it from the list of active majors */ 416 list_del(&tr->list); 417 418 list_for_each_safe(this, next, &tr->devs) { 419 struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list); 420 tr->remove_dev(dev); 421 } 422 423 blk_cleanup_queue(tr->blkcore_priv->rq); 424 unregister_blkdev(tr->major, tr->name); 425 426 mutex_unlock(&mtd_table_mutex); 427 428 kfree(tr->blkcore_priv); 429 430 BUG_ON(!list_empty(&tr->devs)); 431 return 0; 432 } 433 434 static void __exit mtd_blktrans_exit(void) 435 { 436 /* No race here -- if someone's currently in register_mtd_blktrans 437 we're screwed anyway. */ 438 if (blktrans_notifier.list.next) 439 unregister_mtd_user(&blktrans_notifier); 440 } 441 442 module_exit(mtd_blktrans_exit); 443 444 EXPORT_SYMBOL_GPL(register_mtd_blktrans); 445 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); 446 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); 447 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); 448 449 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 450 MODULE_LICENSE("GPL"); 451 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'"); 452