1 /* 2 * $Id: mtd_blkdevs.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $ 3 * 4 * (C) 2003 David Woodhouse <dwmw2@infradead.org> 5 * 6 * Interface to Linux 2.5 block layer for MTD 'translation layers'. 7 * 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/module.h> 13 #include <linux/list.h> 14 #include <linux/fs.h> 15 #include <linux/mtd/blktrans.h> 16 #include <linux/mtd/mtd.h> 17 #include <linux/blkdev.h> 18 #include <linux/blkpg.h> 19 #include <linux/freezer.h> 20 #include <linux/spinlock.h> 21 #include <linux/hdreg.h> 22 #include <linux/init.h> 23 #include <linux/mutex.h> 24 #include <linux/kthread.h> 25 #include <asm/uaccess.h> 26 27 static LIST_HEAD(blktrans_majors); 28 29 extern struct mutex mtd_table_mutex; 30 extern struct mtd_info *mtd_table[]; 31 32 struct mtd_blkcore_priv { 33 struct task_struct *thread; 34 struct request_queue *rq; 35 spinlock_t queue_lock; 36 }; 37 38 static int do_blktrans_request(struct mtd_blktrans_ops *tr, 39 struct mtd_blktrans_dev *dev, 40 struct request *req) 41 { 42 unsigned long block, nsect; 43 char *buf; 44 45 block = req->sector << 9 >> tr->blkshift; 46 nsect = req->current_nr_sectors << 9 >> tr->blkshift; 47 48 buf = req->buffer; 49 50 if (!blk_fs_request(req)) 51 return 0; 52 53 if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) 54 return 0; 55 56 switch(rq_data_dir(req)) { 57 case READ: 58 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 59 if (tr->readsect(dev, block, buf)) 60 return 0; 61 return 1; 62 63 case WRITE: 64 if (!tr->writesect) 65 return 0; 66 67 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 68 if (tr->writesect(dev, block, buf)) 69 return 0; 70 return 1; 71 72 default: 73 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 74 return 0; 75 } 76 } 77 78 static int mtd_blktrans_thread(void *arg) 79 { 80 struct mtd_blktrans_ops *tr = arg; 81 struct request_queue *rq = tr->blkcore_priv->rq; 82 83 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 84 current->flags |= PF_MEMALLOC; 85 86 spin_lock_irq(rq->queue_lock); 87 while (!kthread_should_stop()) { 88 struct request *req; 89 struct mtd_blktrans_dev *dev; 90 int res = 0; 91 92 req = elv_next_request(rq); 93 94 if (!req) { 95 set_current_state(TASK_INTERRUPTIBLE); 96 spin_unlock_irq(rq->queue_lock); 97 schedule(); 98 spin_lock_irq(rq->queue_lock); 99 continue; 100 } 101 102 dev = req->rq_disk->private_data; 103 tr = dev->tr; 104 105 spin_unlock_irq(rq->queue_lock); 106 107 mutex_lock(&dev->lock); 108 res = do_blktrans_request(tr, dev, req); 109 mutex_unlock(&dev->lock); 110 111 spin_lock_irq(rq->queue_lock); 112 113 end_request(req, res); 114 } 115 spin_unlock_irq(rq->queue_lock); 116 117 return 0; 118 } 119 120 static void mtd_blktrans_request(struct request_queue *rq) 121 { 122 struct mtd_blktrans_ops *tr = rq->queuedata; 123 wake_up_process(tr->blkcore_priv->thread); 124 } 125 126 127 static int blktrans_open(struct inode *i, struct file *f) 128 { 129 struct mtd_blktrans_dev *dev; 130 struct mtd_blktrans_ops *tr; 131 int ret = -ENODEV; 132 133 dev = i->i_bdev->bd_disk->private_data; 134 tr = dev->tr; 135 136 if (!try_module_get(dev->mtd->owner)) 137 goto out; 138 139 if (!try_module_get(tr->owner)) 140 goto out_tr; 141 142 /* FIXME: Locking. A hot pluggable device can go away 143 (del_mtd_device can be called for it) without its module 144 being unloaded. */ 145 dev->mtd->usecount++; 146 147 ret = 0; 148 if (tr->open && (ret = tr->open(dev))) { 149 dev->mtd->usecount--; 150 module_put(dev->mtd->owner); 151 out_tr: 152 module_put(tr->owner); 153 } 154 out: 155 return ret; 156 } 157 158 static int blktrans_release(struct inode *i, struct file *f) 159 { 160 struct mtd_blktrans_dev *dev; 161 struct mtd_blktrans_ops *tr; 162 int ret = 0; 163 164 dev = i->i_bdev->bd_disk->private_data; 165 tr = dev->tr; 166 167 if (tr->release) 168 ret = tr->release(dev); 169 170 if (!ret) { 171 dev->mtd->usecount--; 172 module_put(dev->mtd->owner); 173 module_put(tr->owner); 174 } 175 176 return ret; 177 } 178 179 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) 180 { 181 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 182 183 if (dev->tr->getgeo) 184 return dev->tr->getgeo(dev, geo); 185 return -ENOTTY; 186 } 187 188 static int blktrans_ioctl(struct inode *inode, struct file *file, 189 unsigned int cmd, unsigned long arg) 190 { 191 struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data; 192 struct mtd_blktrans_ops *tr = dev->tr; 193 194 switch (cmd) { 195 case BLKFLSBUF: 196 if (tr->flush) 197 return tr->flush(dev); 198 /* The core code did the work, we had nothing to do. */ 199 return 0; 200 default: 201 return -ENOTTY; 202 } 203 } 204 205 struct block_device_operations mtd_blktrans_ops = { 206 .owner = THIS_MODULE, 207 .open = blktrans_open, 208 .release = blktrans_release, 209 .ioctl = blktrans_ioctl, 210 .getgeo = blktrans_getgeo, 211 }; 212 213 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) 214 { 215 struct mtd_blktrans_ops *tr = new->tr; 216 struct list_head *this; 217 int last_devnum = -1; 218 struct gendisk *gd; 219 220 if (mutex_trylock(&mtd_table_mutex)) { 221 mutex_unlock(&mtd_table_mutex); 222 BUG(); 223 } 224 225 list_for_each(this, &tr->devs) { 226 struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list); 227 if (new->devnum == -1) { 228 /* Use first free number */ 229 if (d->devnum != last_devnum+1) { 230 /* Found a free devnum. Plug it in here */ 231 new->devnum = last_devnum+1; 232 list_add_tail(&new->list, &d->list); 233 goto added; 234 } 235 } else if (d->devnum == new->devnum) { 236 /* Required number taken */ 237 return -EBUSY; 238 } else if (d->devnum > new->devnum) { 239 /* Required number was free */ 240 list_add_tail(&new->list, &d->list); 241 goto added; 242 } 243 last_devnum = d->devnum; 244 } 245 if (new->devnum == -1) 246 new->devnum = last_devnum+1; 247 248 if ((new->devnum << tr->part_bits) > 256) { 249 return -EBUSY; 250 } 251 252 mutex_init(&new->lock); 253 list_add_tail(&new->list, &tr->devs); 254 added: 255 if (!tr->writesect) 256 new->readonly = 1; 257 258 gd = alloc_disk(1 << tr->part_bits); 259 if (!gd) { 260 list_del(&new->list); 261 return -ENOMEM; 262 } 263 gd->major = tr->major; 264 gd->first_minor = (new->devnum) << tr->part_bits; 265 gd->fops = &mtd_blktrans_ops; 266 267 if (tr->part_bits) 268 if (new->devnum < 26) 269 snprintf(gd->disk_name, sizeof(gd->disk_name), 270 "%s%c", tr->name, 'a' + new->devnum); 271 else 272 snprintf(gd->disk_name, sizeof(gd->disk_name), 273 "%s%c%c", tr->name, 274 'a' - 1 + new->devnum / 26, 275 'a' + new->devnum % 26); 276 else 277 snprintf(gd->disk_name, sizeof(gd->disk_name), 278 "%s%d", tr->name, new->devnum); 279 280 /* 2.5 has capacity in units of 512 bytes while still 281 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */ 282 set_capacity(gd, (new->size * tr->blksize) >> 9); 283 284 gd->private_data = new; 285 new->blkcore_priv = gd; 286 gd->queue = tr->blkcore_priv->rq; 287 288 if (new->readonly) 289 set_disk_ro(gd, 1); 290 291 add_disk(gd); 292 293 return 0; 294 } 295 296 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) 297 { 298 if (mutex_trylock(&mtd_table_mutex)) { 299 mutex_unlock(&mtd_table_mutex); 300 BUG(); 301 } 302 303 list_del(&old->list); 304 305 del_gendisk(old->blkcore_priv); 306 put_disk(old->blkcore_priv); 307 308 return 0; 309 } 310 311 static void blktrans_notify_remove(struct mtd_info *mtd) 312 { 313 struct list_head *this, *this2, *next; 314 315 list_for_each(this, &blktrans_majors) { 316 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list); 317 318 list_for_each_safe(this2, next, &tr->devs) { 319 struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list); 320 321 if (dev->mtd == mtd) 322 tr->remove_dev(dev); 323 } 324 } 325 } 326 327 static void blktrans_notify_add(struct mtd_info *mtd) 328 { 329 struct list_head *this; 330 331 if (mtd->type == MTD_ABSENT) 332 return; 333 334 list_for_each(this, &blktrans_majors) { 335 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list); 336 337 tr->add_mtd(tr, mtd); 338 } 339 340 } 341 342 static struct mtd_notifier blktrans_notifier = { 343 .add = blktrans_notify_add, 344 .remove = blktrans_notify_remove, 345 }; 346 347 int register_mtd_blktrans(struct mtd_blktrans_ops *tr) 348 { 349 int ret, i; 350 351 /* Register the notifier if/when the first device type is 352 registered, to prevent the link/init ordering from fucking 353 us over. */ 354 if (!blktrans_notifier.list.next) 355 register_mtd_user(&blktrans_notifier); 356 357 tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL); 358 if (!tr->blkcore_priv) 359 return -ENOMEM; 360 361 mutex_lock(&mtd_table_mutex); 362 363 ret = register_blkdev(tr->major, tr->name); 364 if (ret) { 365 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 366 tr->name, tr->major, ret); 367 kfree(tr->blkcore_priv); 368 mutex_unlock(&mtd_table_mutex); 369 return ret; 370 } 371 spin_lock_init(&tr->blkcore_priv->queue_lock); 372 373 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock); 374 if (!tr->blkcore_priv->rq) { 375 unregister_blkdev(tr->major, tr->name); 376 kfree(tr->blkcore_priv); 377 mutex_unlock(&mtd_table_mutex); 378 return -ENOMEM; 379 } 380 381 tr->blkcore_priv->rq->queuedata = tr; 382 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 383 tr->blkshift = ffs(tr->blksize) - 1; 384 385 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, 386 "%sd", tr->name); 387 if (IS_ERR(tr->blkcore_priv->thread)) { 388 blk_cleanup_queue(tr->blkcore_priv->rq); 389 unregister_blkdev(tr->major, tr->name); 390 kfree(tr->blkcore_priv); 391 mutex_unlock(&mtd_table_mutex); 392 return PTR_ERR(tr->blkcore_priv->thread); 393 } 394 395 INIT_LIST_HEAD(&tr->devs); 396 list_add(&tr->list, &blktrans_majors); 397 398 for (i=0; i<MAX_MTD_DEVICES; i++) { 399 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT) 400 tr->add_mtd(tr, mtd_table[i]); 401 } 402 403 mutex_unlock(&mtd_table_mutex); 404 405 return 0; 406 } 407 408 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) 409 { 410 struct list_head *this, *next; 411 412 mutex_lock(&mtd_table_mutex); 413 414 /* Clean up the kernel thread */ 415 kthread_stop(tr->blkcore_priv->thread); 416 417 /* Remove it from the list of active majors */ 418 list_del(&tr->list); 419 420 list_for_each_safe(this, next, &tr->devs) { 421 struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list); 422 tr->remove_dev(dev); 423 } 424 425 blk_cleanup_queue(tr->blkcore_priv->rq); 426 unregister_blkdev(tr->major, tr->name); 427 428 mutex_unlock(&mtd_table_mutex); 429 430 kfree(tr->blkcore_priv); 431 432 BUG_ON(!list_empty(&tr->devs)); 433 return 0; 434 } 435 436 static void __exit mtd_blktrans_exit(void) 437 { 438 /* No race here -- if someone's currently in register_mtd_blktrans 439 we're screwed anyway. */ 440 if (blktrans_notifier.list.next) 441 unregister_mtd_user(&blktrans_notifier); 442 } 443 444 module_exit(mtd_blktrans_exit); 445 446 EXPORT_SYMBOL_GPL(register_mtd_blktrans); 447 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); 448 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); 449 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); 450 451 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 452 MODULE_LICENSE("GPL"); 453 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'"); 454