1 /* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */ 2 /* 3 * aoecmd.c 4 * Filesystem request handling methods 5 */ 6 7 #include <linux/hdreg.h> 8 #include <linux/blkdev.h> 9 #include <linux/skbuff.h> 10 #include <linux/netdevice.h> 11 #include <linux/genhd.h> 12 #include <asm/unaligned.h> 13 #include "aoe.h" 14 15 #define TIMERTICK (HZ / 10) 16 #define MINTIMER (2 * TIMERTICK) 17 #define MAXTIMER (HZ << 1) 18 19 static int aoe_deadsecs = 60 * 3; 20 module_param(aoe_deadsecs, int, 0644); 21 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev."); 22 23 struct sk_buff * 24 new_skb(ulong len) 25 { 26 struct sk_buff *skb; 27 28 skb = alloc_skb(len, GFP_ATOMIC); 29 if (skb) { 30 skb->nh.raw = skb->mac.raw = skb->data; 31 skb->protocol = __constant_htons(ETH_P_AOE); 32 skb->priority = 0; 33 skb->next = skb->prev = NULL; 34 35 /* tell the network layer not to perform IP checksums 36 * or to get the NIC to do it 37 */ 38 skb->ip_summed = CHECKSUM_NONE; 39 } 40 return skb; 41 } 42 43 static struct frame * 44 getframe(struct aoedev *d, int tag) 45 { 46 struct frame *f, *e; 47 48 f = d->frames; 49 e = f + d->nframes; 50 for (; f<e; f++) 51 if (f->tag == tag) 52 return f; 53 return NULL; 54 } 55 56 /* 57 * Leave the top bit clear so we have tagspace for userland. 58 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing. 59 * This driver reserves tag -1 to mean "unused frame." 60 */ 61 static int 62 newtag(struct aoedev *d) 63 { 64 register ulong n; 65 66 n = jiffies & 0xffff; 67 return n |= (++d->lasttag & 0x7fff) << 16; 68 } 69 70 static int 71 aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h) 72 { 73 u32 host_tag = newtag(d); 74 75 memcpy(h->src, d->ifp->dev_addr, sizeof h->src); 76 memcpy(h->dst, d->addr, sizeof h->dst); 77 h->type = __constant_cpu_to_be16(ETH_P_AOE); 78 h->verfl = AOE_HVER; 79 h->major = cpu_to_be16(d->aoemajor); 80 h->minor = d->aoeminor; 81 h->cmd = AOECMD_ATA; 82 h->tag = cpu_to_be32(host_tag); 83 84 return host_tag; 85 } 86 87 static inline void 88 put_lba(struct aoe_atahdr *ah, sector_t lba) 89 { 90 ah->lba0 = lba; 91 ah->lba1 = lba >>= 8; 92 ah->lba2 = lba >>= 8; 93 ah->lba3 = lba >>= 8; 94 ah->lba4 = lba >>= 8; 95 ah->lba5 = lba >>= 8; 96 } 97 98 static void 99 aoecmd_ata_rw(struct aoedev *d, struct frame *f) 100 { 101 struct aoe_hdr *h; 102 struct aoe_atahdr *ah; 103 struct buf *buf; 104 struct sk_buff *skb; 105 ulong bcnt; 106 register sector_t sector; 107 char writebit, extbit; 108 109 writebit = 0x10; 110 extbit = 0x4; 111 112 buf = d->inprocess; 113 114 sector = buf->sector; 115 bcnt = buf->bv_resid; 116 if (bcnt > d->maxbcnt) 117 bcnt = d->maxbcnt; 118 119 /* initialize the headers & frame */ 120 skb = f->skb; 121 h = (struct aoe_hdr *) skb->mac.raw; 122 ah = (struct aoe_atahdr *) (h+1); 123 skb_put(skb, sizeof *h + sizeof *ah); 124 memset(h, 0, skb->len); 125 f->tag = aoehdr_atainit(d, h); 126 f->waited = 0; 127 f->buf = buf; 128 f->bufaddr = buf->bufaddr; 129 f->bcnt = bcnt; 130 f->lba = sector; 131 132 /* set up ata header */ 133 ah->scnt = bcnt >> 9; 134 put_lba(ah, sector); 135 if (d->flags & DEVFL_EXT) { 136 ah->aflags |= AOEAFL_EXT; 137 } else { 138 extbit = 0; 139 ah->lba3 &= 0x0f; 140 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */ 141 } 142 143 if (bio_data_dir(buf->bio) == WRITE) { 144 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr), 145 offset_in_page(f->bufaddr), bcnt); 146 ah->aflags |= AOEAFL_WRITE; 147 skb->len += bcnt; 148 skb->data_len = bcnt; 149 } else { 150 writebit = 0; 151 } 152 153 ah->cmdstat = WIN_READ | writebit | extbit; 154 155 /* mark all tracking fields and load out */ 156 buf->nframesout += 1; 157 buf->bufaddr += bcnt; 158 buf->bv_resid -= bcnt; 159 /* printk(KERN_DEBUG "aoe: bv_resid=%ld\n", buf->bv_resid); */ 160 buf->resid -= bcnt; 161 buf->sector += bcnt >> 9; 162 if (buf->resid == 0) { 163 d->inprocess = NULL; 164 } else if (buf->bv_resid == 0) { 165 buf->bv++; 166 WARN_ON(buf->bv->bv_len == 0); 167 buf->bv_resid = buf->bv->bv_len; 168 buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset; 169 } 170 171 skb->dev = d->ifp; 172 skb = skb_clone(skb, GFP_ATOMIC); 173 if (skb == NULL) 174 return; 175 if (d->sendq_hd) 176 d->sendq_tl->next = skb; 177 else 178 d->sendq_hd = skb; 179 d->sendq_tl = skb; 180 } 181 182 /* some callers cannot sleep, and they can call this function, 183 * transmitting the packets later, when interrupts are on 184 */ 185 static struct sk_buff * 186 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) 187 { 188 struct aoe_hdr *h; 189 struct aoe_cfghdr *ch; 190 struct sk_buff *skb, *sl, *sl_tail; 191 struct net_device *ifp; 192 193 sl = sl_tail = NULL; 194 195 read_lock(&dev_base_lock); 196 for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) { 197 dev_hold(ifp); 198 if (!is_aoe_netif(ifp)) 199 continue; 200 201 skb = new_skb(sizeof *h + sizeof *ch); 202 if (skb == NULL) { 203 printk(KERN_INFO "aoe: skb alloc failure\n"); 204 continue; 205 } 206 skb_put(skb, sizeof *h + sizeof *ch); 207 skb->dev = ifp; 208 if (sl_tail == NULL) 209 sl_tail = skb; 210 h = (struct aoe_hdr *) skb->mac.raw; 211 memset(h, 0, sizeof *h + sizeof *ch); 212 213 memset(h->dst, 0xff, sizeof h->dst); 214 memcpy(h->src, ifp->dev_addr, sizeof h->src); 215 h->type = __constant_cpu_to_be16(ETH_P_AOE); 216 h->verfl = AOE_HVER; 217 h->major = cpu_to_be16(aoemajor); 218 h->minor = aoeminor; 219 h->cmd = AOECMD_CFG; 220 221 skb->next = sl; 222 sl = skb; 223 } 224 read_unlock(&dev_base_lock); 225 226 if (tail != NULL) 227 *tail = sl_tail; 228 return sl; 229 } 230 231 static struct frame * 232 freeframe(struct aoedev *d) 233 { 234 struct frame *f, *e; 235 int n = 0; 236 237 f = d->frames; 238 e = f + d->nframes; 239 for (; f<e; f++) { 240 if (f->tag != FREETAG) 241 continue; 242 if (atomic_read(&skb_shinfo(f->skb)->dataref) == 1) { 243 skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0; 244 skb_trim(f->skb, 0); 245 return f; 246 } 247 n++; 248 } 249 if (n == d->nframes) /* wait for network layer */ 250 d->flags |= DEVFL_KICKME; 251 252 return NULL; 253 } 254 255 /* enters with d->lock held */ 256 void 257 aoecmd_work(struct aoedev *d) 258 { 259 struct frame *f; 260 struct buf *buf; 261 262 if (d->flags & DEVFL_PAUSE) { 263 if (!aoedev_isbusy(d)) 264 d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor, 265 d->aoeminor, &d->sendq_tl); 266 return; 267 } 268 269 loop: 270 f = freeframe(d); 271 if (f == NULL) 272 return; 273 if (d->inprocess == NULL) { 274 if (list_empty(&d->bufq)) 275 return; 276 buf = container_of(d->bufq.next, struct buf, bufs); 277 list_del(d->bufq.next); 278 /*printk(KERN_DEBUG "aoe: bi_size=%ld\n", buf->bio->bi_size); */ 279 d->inprocess = buf; 280 } 281 aoecmd_ata_rw(d, f); 282 goto loop; 283 } 284 285 static void 286 rexmit(struct aoedev *d, struct frame *f) 287 { 288 struct sk_buff *skb; 289 struct aoe_hdr *h; 290 struct aoe_atahdr *ah; 291 char buf[128]; 292 u32 n; 293 294 n = newtag(d); 295 296 snprintf(buf, sizeof buf, 297 "%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n", 298 "retransmit", 299 d->aoemajor, d->aoeminor, f->tag, jiffies, n); 300 aoechr_error(buf); 301 302 skb = f->skb; 303 h = (struct aoe_hdr *) skb->mac.raw; 304 ah = (struct aoe_atahdr *) (h+1); 305 f->tag = n; 306 h->tag = cpu_to_be32(n); 307 memcpy(h->dst, d->addr, sizeof h->dst); 308 memcpy(h->src, d->ifp->dev_addr, sizeof h->src); 309 310 n = DEFAULTBCNT / 512; 311 if (ah->scnt > n) { 312 ah->scnt = n; 313 if (ah->aflags & AOEAFL_WRITE) { 314 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr), 315 offset_in_page(f->bufaddr), DEFAULTBCNT); 316 skb->len = sizeof *h + sizeof *ah + DEFAULTBCNT; 317 skb->data_len = DEFAULTBCNT; 318 } 319 if (++d->lostjumbo > (d->nframes << 1)) 320 if (d->maxbcnt != DEFAULTBCNT) { 321 printk(KERN_INFO "aoe: e%ld.%ld: too many lost jumbo on %s - using 1KB frames.\n", 322 d->aoemajor, d->aoeminor, d->ifp->name); 323 d->maxbcnt = DEFAULTBCNT; 324 d->flags |= DEVFL_MAXBCNT; 325 } 326 } 327 328 skb->dev = d->ifp; 329 skb = skb_clone(skb, GFP_ATOMIC); 330 if (skb == NULL) 331 return; 332 if (d->sendq_hd) 333 d->sendq_tl->next = skb; 334 else 335 d->sendq_hd = skb; 336 d->sendq_tl = skb; 337 } 338 339 static int 340 tsince(int tag) 341 { 342 int n; 343 344 n = jiffies & 0xffff; 345 n -= tag & 0xffff; 346 if (n < 0) 347 n += 1<<16; 348 return n; 349 } 350 351 static void 352 rexmit_timer(ulong vp) 353 { 354 struct aoedev *d; 355 struct frame *f, *e; 356 struct sk_buff *sl; 357 register long timeout; 358 ulong flags, n; 359 360 d = (struct aoedev *) vp; 361 sl = NULL; 362 363 /* timeout is always ~150% of the moving average */ 364 timeout = d->rttavg; 365 timeout += timeout >> 1; 366 367 spin_lock_irqsave(&d->lock, flags); 368 369 if (d->flags & DEVFL_TKILL) { 370 spin_unlock_irqrestore(&d->lock, flags); 371 return; 372 } 373 f = d->frames; 374 e = f + d->nframes; 375 for (; f<e; f++) { 376 if (f->tag != FREETAG && tsince(f->tag) >= timeout) { 377 n = f->waited += timeout; 378 n /= HZ; 379 if (n > aoe_deadsecs) { /* waited too long for response */ 380 aoedev_downdev(d); 381 break; 382 } 383 rexmit(d, f); 384 } 385 } 386 if (d->flags & DEVFL_KICKME) { 387 d->flags &= ~DEVFL_KICKME; 388 aoecmd_work(d); 389 } 390 391 sl = d->sendq_hd; 392 d->sendq_hd = d->sendq_tl = NULL; 393 if (sl) { 394 n = d->rttavg <<= 1; 395 if (n > MAXTIMER) 396 d->rttavg = MAXTIMER; 397 } 398 399 d->timer.expires = jiffies + TIMERTICK; 400 add_timer(&d->timer); 401 402 spin_unlock_irqrestore(&d->lock, flags); 403 404 aoenet_xmit(sl); 405 } 406 407 /* this function performs work that has been deferred until sleeping is OK 408 */ 409 void 410 aoecmd_sleepwork(struct work_struct *work) 411 { 412 struct aoedev *d = container_of(work, struct aoedev, work); 413 414 if (d->flags & DEVFL_GDALLOC) 415 aoeblk_gdalloc(d); 416 417 if (d->flags & DEVFL_NEWSIZE) { 418 struct block_device *bd; 419 unsigned long flags; 420 u64 ssize; 421 422 ssize = d->gd->capacity; 423 bd = bdget_disk(d->gd, 0); 424 425 if (bd) { 426 mutex_lock(&bd->bd_inode->i_mutex); 427 i_size_write(bd->bd_inode, (loff_t)ssize<<9); 428 mutex_unlock(&bd->bd_inode->i_mutex); 429 bdput(bd); 430 } 431 spin_lock_irqsave(&d->lock, flags); 432 d->flags |= DEVFL_UP; 433 d->flags &= ~DEVFL_NEWSIZE; 434 spin_unlock_irqrestore(&d->lock, flags); 435 } 436 } 437 438 static void 439 ataid_complete(struct aoedev *d, unsigned char *id) 440 { 441 u64 ssize; 442 u16 n; 443 444 /* word 83: command set supported */ 445 n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1])); 446 447 /* word 86: command set/feature enabled */ 448 n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1])); 449 450 if (n & (1<<10)) { /* bit 10: LBA 48 */ 451 d->flags |= DEVFL_EXT; 452 453 /* word 100: number lba48 sectors */ 454 ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1])); 455 456 /* set as in ide-disk.c:init_idedisk_capacity */ 457 d->geo.cylinders = ssize; 458 d->geo.cylinders /= (255 * 63); 459 d->geo.heads = 255; 460 d->geo.sectors = 63; 461 } else { 462 d->flags &= ~DEVFL_EXT; 463 464 /* number lba28 sectors */ 465 ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1])); 466 467 /* NOTE: obsolete in ATA 6 */ 468 d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1])); 469 d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1])); 470 d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1])); 471 } 472 473 if (d->ssize != ssize) 474 printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu sectors\n", 475 (unsigned long long)mac_addr(d->addr), 476 d->aoemajor, d->aoeminor, 477 d->fw_ver, (long long)ssize); 478 d->ssize = ssize; 479 d->geo.start = 0; 480 if (d->gd != NULL) { 481 d->gd->capacity = ssize; 482 d->flags |= DEVFL_NEWSIZE; 483 } else { 484 if (d->flags & DEVFL_GDALLOC) { 485 printk(KERN_ERR "aoe: can't schedule work for e%lu.%lu, %s\n", 486 d->aoemajor, d->aoeminor, 487 "it's already on! This shouldn't happen.\n"); 488 return; 489 } 490 d->flags |= DEVFL_GDALLOC; 491 } 492 schedule_work(&d->work); 493 } 494 495 static void 496 calc_rttavg(struct aoedev *d, int rtt) 497 { 498 register long n; 499 500 n = rtt; 501 if (n < 0) { 502 n = -rtt; 503 if (n < MINTIMER) 504 n = MINTIMER; 505 else if (n > MAXTIMER) 506 n = MAXTIMER; 507 d->mintimer += (n - d->mintimer) >> 1; 508 } else if (n < d->mintimer) 509 n = d->mintimer; 510 else if (n > MAXTIMER) 511 n = MAXTIMER; 512 513 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */ 514 n -= d->rttavg; 515 d->rttavg += n >> 2; 516 } 517 518 void 519 aoecmd_ata_rsp(struct sk_buff *skb) 520 { 521 struct aoedev *d; 522 struct aoe_hdr *hin, *hout; 523 struct aoe_atahdr *ahin, *ahout; 524 struct frame *f; 525 struct buf *buf; 526 struct sk_buff *sl; 527 register long n; 528 ulong flags; 529 char ebuf[128]; 530 u16 aoemajor; 531 532 hin = (struct aoe_hdr *) skb->mac.raw; 533 aoemajor = be16_to_cpu(get_unaligned(&hin->major)); 534 d = aoedev_by_aoeaddr(aoemajor, hin->minor); 535 if (d == NULL) { 536 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response " 537 "for unknown device %d.%d\n", 538 aoemajor, hin->minor); 539 aoechr_error(ebuf); 540 return; 541 } 542 543 spin_lock_irqsave(&d->lock, flags); 544 545 n = be32_to_cpu(get_unaligned(&hin->tag)); 546 f = getframe(d, n); 547 if (f == NULL) { 548 calc_rttavg(d, -tsince(n)); 549 spin_unlock_irqrestore(&d->lock, flags); 550 snprintf(ebuf, sizeof ebuf, 551 "%15s e%d.%d tag=%08x@%08lx\n", 552 "unexpected rsp", 553 be16_to_cpu(get_unaligned(&hin->major)), 554 hin->minor, 555 be32_to_cpu(get_unaligned(&hin->tag)), 556 jiffies); 557 aoechr_error(ebuf); 558 return; 559 } 560 561 calc_rttavg(d, tsince(f->tag)); 562 563 ahin = (struct aoe_atahdr *) (hin+1); 564 hout = (struct aoe_hdr *) f->skb->mac.raw; 565 ahout = (struct aoe_atahdr *) (hout+1); 566 buf = f->buf; 567 568 if (ahout->cmdstat == WIN_IDENTIFY) 569 d->flags &= ~DEVFL_PAUSE; 570 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */ 571 printk(KERN_ERR 572 "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%ld\n", 573 ahout->cmdstat, ahin->cmdstat, 574 d->aoemajor, d->aoeminor); 575 if (buf) 576 buf->flags |= BUFFL_FAIL; 577 } else { 578 n = ahout->scnt << 9; 579 switch (ahout->cmdstat) { 580 case WIN_READ: 581 case WIN_READ_EXT: 582 if (skb->len - sizeof *hin - sizeof *ahin < n) { 583 printk(KERN_ERR 584 "aoe: runt data size in read. skb->len=%d\n", 585 skb->len); 586 /* fail frame f? just returning will rexmit. */ 587 spin_unlock_irqrestore(&d->lock, flags); 588 return; 589 } 590 memcpy(f->bufaddr, ahin+1, n); 591 case WIN_WRITE: 592 case WIN_WRITE_EXT: 593 if (f->bcnt -= n) { 594 skb = f->skb; 595 f->bufaddr += n; 596 put_lba(ahout, f->lba += ahout->scnt); 597 n = f->bcnt; 598 if (n > DEFAULTBCNT) 599 n = DEFAULTBCNT; 600 ahout->scnt = n >> 9; 601 if (ahout->aflags & AOEAFL_WRITE) { 602 skb_fill_page_desc(skb, 0, 603 virt_to_page(f->bufaddr), 604 offset_in_page(f->bufaddr), n); 605 skb->len = sizeof *hout + sizeof *ahout + n; 606 skb->data_len = n; 607 } 608 f->tag = newtag(d); 609 hout->tag = cpu_to_be32(f->tag); 610 skb->dev = d->ifp; 611 skb = skb_clone(skb, GFP_ATOMIC); 612 spin_unlock_irqrestore(&d->lock, flags); 613 if (skb) 614 aoenet_xmit(skb); 615 return; 616 } 617 if (n > DEFAULTBCNT) 618 d->lostjumbo = 0; 619 break; 620 case WIN_IDENTIFY: 621 if (skb->len - sizeof *hin - sizeof *ahin < 512) { 622 printk(KERN_INFO 623 "aoe: runt data size in ataid. skb->len=%d\n", 624 skb->len); 625 spin_unlock_irqrestore(&d->lock, flags); 626 return; 627 } 628 ataid_complete(d, (char *) (ahin+1)); 629 break; 630 default: 631 printk(KERN_INFO 632 "aoe: unrecognized ata command %2.2Xh for %d.%d\n", 633 ahout->cmdstat, 634 be16_to_cpu(get_unaligned(&hin->major)), 635 hin->minor); 636 } 637 } 638 639 if (buf) { 640 buf->nframesout -= 1; 641 if (buf->nframesout == 0 && buf->resid == 0) { 642 unsigned long duration = jiffies - buf->start_time; 643 unsigned long n_sect = buf->bio->bi_size >> 9; 644 struct gendisk *disk = d->gd; 645 const int rw = bio_data_dir(buf->bio); 646 647 disk_stat_inc(disk, ios[rw]); 648 disk_stat_add(disk, ticks[rw], duration); 649 disk_stat_add(disk, sectors[rw], n_sect); 650 disk_stat_add(disk, io_ticks, duration); 651 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; 652 bio_endio(buf->bio, buf->bio->bi_size, n); 653 mempool_free(buf, d->bufpool); 654 } 655 } 656 657 f->buf = NULL; 658 f->tag = FREETAG; 659 660 aoecmd_work(d); 661 sl = d->sendq_hd; 662 d->sendq_hd = d->sendq_tl = NULL; 663 664 spin_unlock_irqrestore(&d->lock, flags); 665 aoenet_xmit(sl); 666 } 667 668 void 669 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor) 670 { 671 struct sk_buff *sl; 672 673 sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL); 674 675 aoenet_xmit(sl); 676 } 677 678 /* 679 * Since we only call this in one place (and it only prepares one frame) 680 * we just return the skb. Usually we'd chain it up to the aoedev sendq. 681 */ 682 static struct sk_buff * 683 aoecmd_ata_id(struct aoedev *d) 684 { 685 struct aoe_hdr *h; 686 struct aoe_atahdr *ah; 687 struct frame *f; 688 struct sk_buff *skb; 689 690 f = freeframe(d); 691 if (f == NULL) { 692 printk(KERN_ERR "aoe: can't get a frame. This shouldn't happen.\n"); 693 return NULL; 694 } 695 696 /* initialize the headers & frame */ 697 skb = f->skb; 698 h = (struct aoe_hdr *) skb->mac.raw; 699 ah = (struct aoe_atahdr *) (h+1); 700 skb_put(skb, sizeof *h + sizeof *ah); 701 memset(h, 0, skb->len); 702 f->tag = aoehdr_atainit(d, h); 703 f->waited = 0; 704 705 /* set up ata header */ 706 ah->scnt = 1; 707 ah->cmdstat = WIN_IDENTIFY; 708 ah->lba3 = 0xa0; 709 710 skb->dev = d->ifp; 711 712 d->rttavg = MAXTIMER; 713 d->timer.function = rexmit_timer; 714 715 return skb_clone(skb, GFP_ATOMIC); 716 } 717 718 void 719 aoecmd_cfg_rsp(struct sk_buff *skb) 720 { 721 struct aoedev *d; 722 struct aoe_hdr *h; 723 struct aoe_cfghdr *ch; 724 ulong flags, sysminor, aoemajor; 725 struct sk_buff *sl; 726 enum { MAXFRAMES = 16 }; 727 u16 n; 728 729 h = (struct aoe_hdr *) skb->mac.raw; 730 ch = (struct aoe_cfghdr *) (h+1); 731 732 /* 733 * Enough people have their dip switches set backwards to 734 * warrant a loud message for this special case. 735 */ 736 aoemajor = be16_to_cpu(get_unaligned(&h->major)); 737 if (aoemajor == 0xfff) { 738 printk(KERN_ERR "aoe: Warning: shelf address is all ones. " 739 "Check shelf dip switches.\n"); 740 return; 741 } 742 743 sysminor = SYSMINOR(aoemajor, h->minor); 744 if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) { 745 printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n", 746 aoemajor, (int) h->minor); 747 return; 748 } 749 750 n = be16_to_cpu(ch->bufcnt); 751 if (n > MAXFRAMES) /* keep it reasonable */ 752 n = MAXFRAMES; 753 754 d = aoedev_by_sysminor_m(sysminor, n); 755 if (d == NULL) { 756 printk(KERN_INFO "aoe: device sysminor_m failure\n"); 757 return; 758 } 759 760 spin_lock_irqsave(&d->lock, flags); 761 762 /* permit device to migrate mac and network interface */ 763 d->ifp = skb->dev; 764 memcpy(d->addr, h->src, sizeof d->addr); 765 if (!(d->flags & DEVFL_MAXBCNT)) { 766 n = d->ifp->mtu; 767 n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr); 768 n /= 512; 769 if (n > ch->scnt) 770 n = ch->scnt; 771 n = n ? n * 512 : DEFAULTBCNT; 772 if (n != d->maxbcnt) { 773 printk(KERN_INFO 774 "aoe: e%ld.%ld: setting %d byte data frames on %s\n", 775 d->aoemajor, d->aoeminor, n, d->ifp->name); 776 d->maxbcnt = n; 777 } 778 } 779 780 /* don't change users' perspective */ 781 if (d->nopen && !(d->flags & DEVFL_PAUSE)) { 782 spin_unlock_irqrestore(&d->lock, flags); 783 return; 784 } 785 d->flags |= DEVFL_PAUSE; /* force pause */ 786 d->mintimer = MINTIMER; 787 d->fw_ver = be16_to_cpu(ch->fwver); 788 789 /* check for already outstanding ataid */ 790 sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL; 791 792 spin_unlock_irqrestore(&d->lock, flags); 793 794 aoenet_xmit(sl); 795 } 796 797