1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ 2 /* 3 * aoeblk.c 4 * block device routines 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/hdreg.h> 9 #include <linux/blkdev.h> 10 #include <linux/backing-dev.h> 11 #include <linux/fs.h> 12 #include <linux/ioctl.h> 13 #include <linux/slab.h> 14 #include <linux/ratelimit.h> 15 #include <linux/genhd.h> 16 #include <linux/netdevice.h> 17 #include <linux/mutex.h> 18 #include "aoe.h" 19 20 static DEFINE_MUTEX(aoeblk_mutex); 21 static struct kmem_cache *buf_pool_cache; 22 23 static ssize_t aoedisk_show_state(struct device *dev, 24 struct device_attribute *attr, char *page) 25 { 26 struct gendisk *disk = dev_to_disk(dev); 27 struct aoedev *d = disk->private_data; 28 29 return snprintf(page, PAGE_SIZE, 30 "%s%s\n", 31 (d->flags & DEVFL_UP) ? "up" : "down", 32 (d->flags & DEVFL_KICKME) ? ",kickme" : 33 (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : ""); 34 /* I'd rather see nopen exported so we can ditch closewait */ 35 } 36 static ssize_t aoedisk_show_mac(struct device *dev, 37 struct device_attribute *attr, char *page) 38 { 39 struct gendisk *disk = dev_to_disk(dev); 40 struct aoedev *d = disk->private_data; 41 struct aoetgt *t = d->targets[0]; 42 43 if (t == NULL) 44 return snprintf(page, PAGE_SIZE, "none\n"); 45 return snprintf(page, PAGE_SIZE, "%pm\n", t->addr); 46 } 47 static ssize_t aoedisk_show_netif(struct device *dev, 48 struct device_attribute *attr, char *page) 49 { 50 struct gendisk *disk = dev_to_disk(dev); 51 struct aoedev *d = disk->private_data; 52 struct net_device *nds[8], **nd, **nnd, **ne; 53 struct aoetgt **t, **te; 54 struct aoeif *ifp, *e; 55 char *p; 56 57 memset(nds, 0, sizeof nds); 58 nd = nds; 59 ne = nd + ARRAY_SIZE(nds); 60 t = d->targets; 61 te = t + NTARGETS; 62 for (; t < te && *t; t++) { 63 ifp = (*t)->ifs; 64 e = ifp + NAOEIFS; 65 for (; ifp < e && ifp->nd; ifp++) { 66 for (nnd = nds; nnd < nd; nnd++) 67 if (*nnd == ifp->nd) 68 break; 69 if (nnd == nd && nd != ne) 70 *nd++ = ifp->nd; 71 } 72 } 73 74 ne = nd; 75 nd = nds; 76 if (*nd == NULL) 77 return snprintf(page, PAGE_SIZE, "none\n"); 78 for (p = page; nd < ne; nd++) 79 p += snprintf(p, PAGE_SIZE - (p-page), "%s%s", 80 p == page ? "" : ",", (*nd)->name); 81 p += snprintf(p, PAGE_SIZE - (p-page), "\n"); 82 return p-page; 83 } 84 /* firmware version */ 85 static ssize_t aoedisk_show_fwver(struct device *dev, 86 struct device_attribute *attr, char *page) 87 { 88 struct gendisk *disk = dev_to_disk(dev); 89 struct aoedev *d = disk->private_data; 90 91 return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver); 92 } 93 94 static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL); 95 static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL); 96 static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL); 97 static struct device_attribute dev_attr_firmware_version = { 98 .attr = { .name = "firmware-version", .mode = S_IRUGO }, 99 .show = aoedisk_show_fwver, 100 }; 101 102 static struct attribute *aoe_attrs[] = { 103 &dev_attr_state.attr, 104 &dev_attr_mac.attr, 105 &dev_attr_netif.attr, 106 &dev_attr_firmware_version.attr, 107 NULL, 108 }; 109 110 static const struct attribute_group attr_group = { 111 .attrs = aoe_attrs, 112 }; 113 114 static int 115 aoedisk_add_sysfs(struct aoedev *d) 116 { 117 return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group); 118 } 119 void 120 aoedisk_rm_sysfs(struct aoedev *d) 121 { 122 sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group); 123 } 124 125 static int 126 aoeblk_open(struct block_device *bdev, fmode_t mode) 127 { 128 struct aoedev *d = bdev->bd_disk->private_data; 129 ulong flags; 130 131 mutex_lock(&aoeblk_mutex); 132 spin_lock_irqsave(&d->lock, flags); 133 if (d->flags & DEVFL_UP) { 134 d->nopen++; 135 spin_unlock_irqrestore(&d->lock, flags); 136 mutex_unlock(&aoeblk_mutex); 137 return 0; 138 } 139 spin_unlock_irqrestore(&d->lock, flags); 140 mutex_unlock(&aoeblk_mutex); 141 return -ENODEV; 142 } 143 144 static int 145 aoeblk_release(struct gendisk *disk, fmode_t mode) 146 { 147 struct aoedev *d = disk->private_data; 148 ulong flags; 149 150 spin_lock_irqsave(&d->lock, flags); 151 152 if (--d->nopen == 0) { 153 spin_unlock_irqrestore(&d->lock, flags); 154 aoecmd_cfg(d->aoemajor, d->aoeminor); 155 return 0; 156 } 157 spin_unlock_irqrestore(&d->lock, flags); 158 159 return 0; 160 } 161 162 static int 163 aoeblk_make_request(struct request_queue *q, struct bio *bio) 164 { 165 struct sk_buff_head queue; 166 struct aoedev *d; 167 struct buf *buf; 168 ulong flags; 169 170 blk_queue_bounce(q, &bio); 171 172 if (bio == NULL) { 173 printk(KERN_ERR "aoe: bio is NULL\n"); 174 BUG(); 175 return 0; 176 } 177 d = bio->bi_bdev->bd_disk->private_data; 178 if (d == NULL) { 179 printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); 180 BUG(); 181 bio_endio(bio, -ENXIO); 182 return 0; 183 } else if (bio->bi_rw & REQ_HARDBARRIER) { 184 bio_endio(bio, -EOPNOTSUPP); 185 return 0; 186 } else if (bio->bi_io_vec == NULL) { 187 printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); 188 BUG(); 189 bio_endio(bio, -ENXIO); 190 return 0; 191 } 192 buf = mempool_alloc(d->bufpool, GFP_NOIO); 193 if (buf == NULL) { 194 printk(KERN_INFO "aoe: buf allocation failure\n"); 195 bio_endio(bio, -ENOMEM); 196 return 0; 197 } 198 memset(buf, 0, sizeof(*buf)); 199 INIT_LIST_HEAD(&buf->bufs); 200 buf->stime = jiffies; 201 buf->bio = bio; 202 buf->resid = bio->bi_size; 203 buf->sector = bio->bi_sector; 204 buf->bv = &bio->bi_io_vec[bio->bi_idx]; 205 buf->bv_resid = buf->bv->bv_len; 206 WARN_ON(buf->bv_resid == 0); 207 buf->bv_off = buf->bv->bv_offset; 208 209 spin_lock_irqsave(&d->lock, flags); 210 211 if ((d->flags & DEVFL_UP) == 0) { 212 pr_info_ratelimited("aoe: device %ld.%d is not up\n", 213 d->aoemajor, d->aoeminor); 214 spin_unlock_irqrestore(&d->lock, flags); 215 mempool_free(buf, d->bufpool); 216 bio_endio(bio, -ENXIO); 217 return 0; 218 } 219 220 list_add_tail(&buf->bufs, &d->bufq); 221 222 aoecmd_work(d); 223 __skb_queue_head_init(&queue); 224 skb_queue_splice_init(&d->sendq, &queue); 225 226 spin_unlock_irqrestore(&d->lock, flags); 227 aoenet_xmit(&queue); 228 229 return 0; 230 } 231 232 static int 233 aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 234 { 235 struct aoedev *d = bdev->bd_disk->private_data; 236 237 if ((d->flags & DEVFL_UP) == 0) { 238 printk(KERN_ERR "aoe: disk not up\n"); 239 return -ENODEV; 240 } 241 242 geo->cylinders = d->geo.cylinders; 243 geo->heads = d->geo.heads; 244 geo->sectors = d->geo.sectors; 245 return 0; 246 } 247 248 static const struct block_device_operations aoe_bdops = { 249 .open = aoeblk_open, 250 .release = aoeblk_release, 251 .getgeo = aoeblk_getgeo, 252 .owner = THIS_MODULE, 253 }; 254 255 /* alloc_disk and add_disk can sleep */ 256 void 257 aoeblk_gdalloc(void *vp) 258 { 259 struct aoedev *d = vp; 260 struct gendisk *gd; 261 ulong flags; 262 263 gd = alloc_disk(AOE_PARTITIONS); 264 if (gd == NULL) { 265 printk(KERN_ERR 266 "aoe: cannot allocate disk structure for %ld.%d\n", 267 d->aoemajor, d->aoeminor); 268 goto err; 269 } 270 271 d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache); 272 if (d->bufpool == NULL) { 273 printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n", 274 d->aoemajor, d->aoeminor); 275 goto err_disk; 276 } 277 278 d->blkq = blk_alloc_queue(GFP_KERNEL); 279 if (!d->blkq) 280 goto err_mempool; 281 blk_queue_make_request(d->blkq, aoeblk_make_request); 282 d->blkq->backing_dev_info.name = "aoe"; 283 if (bdi_init(&d->blkq->backing_dev_info)) 284 goto err_blkq; 285 spin_lock_irqsave(&d->lock, flags); 286 gd->major = AOE_MAJOR; 287 gd->first_minor = d->sysminor * AOE_PARTITIONS; 288 gd->fops = &aoe_bdops; 289 gd->private_data = d; 290 set_capacity(gd, d->ssize); 291 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", 292 d->aoemajor, d->aoeminor); 293 294 gd->queue = d->blkq; 295 d->gd = gd; 296 d->flags &= ~DEVFL_GDALLOC; 297 d->flags |= DEVFL_UP; 298 299 spin_unlock_irqrestore(&d->lock, flags); 300 301 add_disk(gd); 302 aoedisk_add_sysfs(d); 303 return; 304 305 err_blkq: 306 blk_cleanup_queue(d->blkq); 307 d->blkq = NULL; 308 err_mempool: 309 mempool_destroy(d->bufpool); 310 err_disk: 311 put_disk(gd); 312 err: 313 spin_lock_irqsave(&d->lock, flags); 314 d->flags &= ~DEVFL_GDALLOC; 315 spin_unlock_irqrestore(&d->lock, flags); 316 } 317 318 void 319 aoeblk_exit(void) 320 { 321 kmem_cache_destroy(buf_pool_cache); 322 } 323 324 int __init 325 aoeblk_init(void) 326 { 327 buf_pool_cache = kmem_cache_create("aoe_bufs", 328 sizeof(struct buf), 329 0, 0, NULL); 330 if (buf_pool_cache == NULL) 331 return -ENOMEM; 332 333 return 0; 334 } 335 336