xref: /linux/drivers/block/aoe/aoecmd.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /* Copyright (c) 2006 Coraid, Inc.  See COPYING for GPL terms. */
2 /*
3  * aoecmd.c
4  * Filesystem request handling methods
5  */
6 
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/skbuff.h>
10 #include <linux/netdevice.h>
11 #include <linux/genhd.h>
12 #include <net/net_namespace.h>
13 #include <asm/unaligned.h>
14 #include "aoe.h"
15 
16 #define TIMERTICK (HZ / 10)
17 #define MINTIMER (2 * TIMERTICK)
18 #define MAXTIMER (HZ << 1)
19 
20 static int aoe_deadsecs = 60 * 3;
21 module_param(aoe_deadsecs, int, 0644);
22 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
23 
24 struct sk_buff *
25 new_skb(ulong len)
26 {
27 	struct sk_buff *skb;
28 
29 	skb = alloc_skb(len, GFP_ATOMIC);
30 	if (skb) {
31 		skb_reset_mac_header(skb);
32 		skb_reset_network_header(skb);
33 		skb->protocol = __constant_htons(ETH_P_AOE);
34 		skb->priority = 0;
35 		skb->next = skb->prev = NULL;
36 
37 		/* tell the network layer not to perform IP checksums
38 		 * or to get the NIC to do it
39 		 */
40 		skb->ip_summed = CHECKSUM_NONE;
41 	}
42 	return skb;
43 }
44 
45 static struct frame *
46 getframe(struct aoedev *d, int tag)
47 {
48 	struct frame *f, *e;
49 
50 	f = d->frames;
51 	e = f + d->nframes;
52 	for (; f<e; f++)
53 		if (f->tag == tag)
54 			return f;
55 	return NULL;
56 }
57 
58 /*
59  * Leave the top bit clear so we have tagspace for userland.
60  * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
61  * This driver reserves tag -1 to mean "unused frame."
62  */
63 static int
64 newtag(struct aoedev *d)
65 {
66 	register ulong n;
67 
68 	n = jiffies & 0xffff;
69 	return n |= (++d->lasttag & 0x7fff) << 16;
70 }
71 
72 static int
73 aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h)
74 {
75 	u32 host_tag = newtag(d);
76 
77 	memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
78 	memcpy(h->dst, d->addr, sizeof h->dst);
79 	h->type = __constant_cpu_to_be16(ETH_P_AOE);
80 	h->verfl = AOE_HVER;
81 	h->major = cpu_to_be16(d->aoemajor);
82 	h->minor = d->aoeminor;
83 	h->cmd = AOECMD_ATA;
84 	h->tag = cpu_to_be32(host_tag);
85 
86 	return host_tag;
87 }
88 
89 static inline void
90 put_lba(struct aoe_atahdr *ah, sector_t lba)
91 {
92 	ah->lba0 = lba;
93 	ah->lba1 = lba >>= 8;
94 	ah->lba2 = lba >>= 8;
95 	ah->lba3 = lba >>= 8;
96 	ah->lba4 = lba >>= 8;
97 	ah->lba5 = lba >>= 8;
98 }
99 
100 static void
101 aoecmd_ata_rw(struct aoedev *d, struct frame *f)
102 {
103 	struct aoe_hdr *h;
104 	struct aoe_atahdr *ah;
105 	struct buf *buf;
106 	struct sk_buff *skb;
107 	ulong bcnt;
108 	register sector_t sector;
109 	char writebit, extbit;
110 
111 	writebit = 0x10;
112 	extbit = 0x4;
113 
114 	buf = d->inprocess;
115 
116 	sector = buf->sector;
117 	bcnt = buf->bv_resid;
118 	if (bcnt > d->maxbcnt)
119 		bcnt = d->maxbcnt;
120 
121 	/* initialize the headers & frame */
122 	skb = f->skb;
123 	h = aoe_hdr(skb);
124 	ah = (struct aoe_atahdr *) (h+1);
125 	skb_put(skb, sizeof *h + sizeof *ah);
126 	memset(h, 0, skb->len);
127 	f->tag = aoehdr_atainit(d, h);
128 	f->waited = 0;
129 	f->buf = buf;
130 	f->bufaddr = buf->bufaddr;
131 	f->bcnt = bcnt;
132 	f->lba = sector;
133 
134 	/* set up ata header */
135 	ah->scnt = bcnt >> 9;
136 	put_lba(ah, sector);
137 	if (d->flags & DEVFL_EXT) {
138 		ah->aflags |= AOEAFL_EXT;
139 	} else {
140 		extbit = 0;
141 		ah->lba3 &= 0x0f;
142 		ah->lba3 |= 0xe0;	/* LBA bit + obsolete 0xa0 */
143 	}
144 
145 	if (bio_data_dir(buf->bio) == WRITE) {
146 		skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
147 			offset_in_page(f->bufaddr), bcnt);
148 		ah->aflags |= AOEAFL_WRITE;
149 		skb->len += bcnt;
150 		skb->data_len = bcnt;
151 	} else {
152 		writebit = 0;
153 	}
154 
155 	ah->cmdstat = WIN_READ | writebit | extbit;
156 
157 	/* mark all tracking fields and load out */
158 	buf->nframesout += 1;
159 	buf->bufaddr += bcnt;
160 	buf->bv_resid -= bcnt;
161 /* printk(KERN_DEBUG "aoe: bv_resid=%ld\n", buf->bv_resid); */
162 	buf->resid -= bcnt;
163 	buf->sector += bcnt >> 9;
164 	if (buf->resid == 0) {
165 		d->inprocess = NULL;
166 	} else if (buf->bv_resid == 0) {
167 		buf->bv++;
168 		WARN_ON(buf->bv->bv_len == 0);
169 		buf->bv_resid = buf->bv->bv_len;
170 		buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
171 	}
172 
173 	skb->dev = d->ifp;
174 	skb = skb_clone(skb, GFP_ATOMIC);
175 	if (skb == NULL)
176 		return;
177 	if (d->sendq_hd)
178 		d->sendq_tl->next = skb;
179 	else
180 		d->sendq_hd = skb;
181 	d->sendq_tl = skb;
182 }
183 
184 /* some callers cannot sleep, and they can call this function,
185  * transmitting the packets later, when interrupts are on
186  */
187 static struct sk_buff *
188 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
189 {
190 	struct aoe_hdr *h;
191 	struct aoe_cfghdr *ch;
192 	struct sk_buff *skb, *sl, *sl_tail;
193 	struct net_device *ifp;
194 
195 	sl = sl_tail = NULL;
196 
197 	read_lock(&dev_base_lock);
198 	for_each_netdev(&init_net, ifp) {
199 		dev_hold(ifp);
200 		if (!is_aoe_netif(ifp))
201 			goto cont;
202 
203 		skb = new_skb(sizeof *h + sizeof *ch);
204 		if (skb == NULL) {
205 			printk(KERN_INFO "aoe: skb alloc failure\n");
206 			goto cont;
207 		}
208 		skb_put(skb, sizeof *h + sizeof *ch);
209 		skb->dev = ifp;
210 		if (sl_tail == NULL)
211 			sl_tail = skb;
212 		h = aoe_hdr(skb);
213 		memset(h, 0, sizeof *h + sizeof *ch);
214 
215 		memset(h->dst, 0xff, sizeof h->dst);
216 		memcpy(h->src, ifp->dev_addr, sizeof h->src);
217 		h->type = __constant_cpu_to_be16(ETH_P_AOE);
218 		h->verfl = AOE_HVER;
219 		h->major = cpu_to_be16(aoemajor);
220 		h->minor = aoeminor;
221 		h->cmd = AOECMD_CFG;
222 
223 		skb->next = sl;
224 		sl = skb;
225 cont:
226 		dev_put(ifp);
227 	}
228 	read_unlock(&dev_base_lock);
229 
230 	if (tail != NULL)
231 		*tail = sl_tail;
232 	return sl;
233 }
234 
235 static struct frame *
236 freeframe(struct aoedev *d)
237 {
238 	struct frame *f, *e;
239 	int n = 0;
240 
241 	f = d->frames;
242 	e = f + d->nframes;
243 	for (; f<e; f++) {
244 		if (f->tag != FREETAG)
245 			continue;
246 		if (atomic_read(&skb_shinfo(f->skb)->dataref) == 1) {
247 			skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
248 			skb_trim(f->skb, 0);
249 			return f;
250 		}
251 		n++;
252 	}
253 	if (n == d->nframes)	/* wait for network layer */
254 		d->flags |= DEVFL_KICKME;
255 
256 	return NULL;
257 }
258 
259 /* enters with d->lock held */
260 void
261 aoecmd_work(struct aoedev *d)
262 {
263 	struct frame *f;
264 	struct buf *buf;
265 
266 	if (d->flags & DEVFL_PAUSE) {
267 		if (!aoedev_isbusy(d))
268 			d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor,
269 						d->aoeminor, &d->sendq_tl);
270 		return;
271 	}
272 
273 loop:
274 	f = freeframe(d);
275 	if (f == NULL)
276 		return;
277 	if (d->inprocess == NULL) {
278 		if (list_empty(&d->bufq))
279 			return;
280 		buf = container_of(d->bufq.next, struct buf, bufs);
281 		list_del(d->bufq.next);
282 /*printk(KERN_DEBUG "aoe: bi_size=%ld\n", buf->bio->bi_size); */
283 		d->inprocess = buf;
284 	}
285 	aoecmd_ata_rw(d, f);
286 	goto loop;
287 }
288 
289 static void
290 rexmit(struct aoedev *d, struct frame *f)
291 {
292 	struct sk_buff *skb;
293 	struct aoe_hdr *h;
294 	struct aoe_atahdr *ah;
295 	char buf[128];
296 	u32 n;
297 
298 	n = newtag(d);
299 
300 	snprintf(buf, sizeof buf,
301 		"%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n",
302 		"retransmit",
303 		d->aoemajor, d->aoeminor, f->tag, jiffies, n);
304 	aoechr_error(buf);
305 
306 	skb = f->skb;
307 	h = aoe_hdr(skb);
308 	ah = (struct aoe_atahdr *) (h+1);
309 	f->tag = n;
310 	h->tag = cpu_to_be32(n);
311 	memcpy(h->dst, d->addr, sizeof h->dst);
312 	memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
313 
314 	n = DEFAULTBCNT / 512;
315 	if (ah->scnt > n) {
316 		ah->scnt = n;
317 		if (ah->aflags & AOEAFL_WRITE) {
318 			skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
319 				offset_in_page(f->bufaddr), DEFAULTBCNT);
320 			skb->len = sizeof *h + sizeof *ah + DEFAULTBCNT;
321 			skb->data_len = DEFAULTBCNT;
322 		}
323 		if (++d->lostjumbo > (d->nframes << 1))
324 		if (d->maxbcnt != DEFAULTBCNT) {
325 			printk(KERN_INFO "aoe: e%ld.%ld: too many lost jumbo on %s - using 1KB frames.\n",
326 				d->aoemajor, d->aoeminor, d->ifp->name);
327 			d->maxbcnt = DEFAULTBCNT;
328 			d->flags |= DEVFL_MAXBCNT;
329 		}
330 	}
331 
332 	skb->dev = d->ifp;
333 	skb = skb_clone(skb, GFP_ATOMIC);
334 	if (skb == NULL)
335 		return;
336 	if (d->sendq_hd)
337 		d->sendq_tl->next = skb;
338 	else
339 		d->sendq_hd = skb;
340 	d->sendq_tl = skb;
341 }
342 
343 static int
344 tsince(int tag)
345 {
346 	int n;
347 
348 	n = jiffies & 0xffff;
349 	n -= tag & 0xffff;
350 	if (n < 0)
351 		n += 1<<16;
352 	return n;
353 }
354 
355 static void
356 rexmit_timer(ulong vp)
357 {
358 	struct aoedev *d;
359 	struct frame *f, *e;
360 	struct sk_buff *sl;
361 	register long timeout;
362 	ulong flags, n;
363 
364 	d = (struct aoedev *) vp;
365 	sl = NULL;
366 
367 	/* timeout is always ~150% of the moving average */
368 	timeout = d->rttavg;
369 	timeout += timeout >> 1;
370 
371 	spin_lock_irqsave(&d->lock, flags);
372 
373 	if (d->flags & DEVFL_TKILL) {
374 		spin_unlock_irqrestore(&d->lock, flags);
375 		return;
376 	}
377 	f = d->frames;
378 	e = f + d->nframes;
379 	for (; f<e; f++) {
380 		if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
381 			n = f->waited += timeout;
382 			n /= HZ;
383 			if (n > aoe_deadsecs) { /* waited too long for response */
384 				aoedev_downdev(d);
385 				break;
386 			}
387 			rexmit(d, f);
388 		}
389 	}
390 	if (d->flags & DEVFL_KICKME) {
391 		d->flags &= ~DEVFL_KICKME;
392 		aoecmd_work(d);
393 	}
394 
395 	sl = d->sendq_hd;
396 	d->sendq_hd = d->sendq_tl = NULL;
397 	if (sl) {
398 		n = d->rttavg <<= 1;
399 		if (n > MAXTIMER)
400 			d->rttavg = MAXTIMER;
401 	}
402 
403 	d->timer.expires = jiffies + TIMERTICK;
404 	add_timer(&d->timer);
405 
406 	spin_unlock_irqrestore(&d->lock, flags);
407 
408 	aoenet_xmit(sl);
409 }
410 
411 /* this function performs work that has been deferred until sleeping is OK
412  */
413 void
414 aoecmd_sleepwork(struct work_struct *work)
415 {
416 	struct aoedev *d = container_of(work, struct aoedev, work);
417 
418 	if (d->flags & DEVFL_GDALLOC)
419 		aoeblk_gdalloc(d);
420 
421 	if (d->flags & DEVFL_NEWSIZE) {
422 		struct block_device *bd;
423 		unsigned long flags;
424 		u64 ssize;
425 
426 		ssize = d->gd->capacity;
427 		bd = bdget_disk(d->gd, 0);
428 
429 		if (bd) {
430 			mutex_lock(&bd->bd_inode->i_mutex);
431 			i_size_write(bd->bd_inode, (loff_t)ssize<<9);
432 			mutex_unlock(&bd->bd_inode->i_mutex);
433 			bdput(bd);
434 		}
435 		spin_lock_irqsave(&d->lock, flags);
436 		d->flags |= DEVFL_UP;
437 		d->flags &= ~DEVFL_NEWSIZE;
438 		spin_unlock_irqrestore(&d->lock, flags);
439 	}
440 }
441 
442 static void
443 ataid_complete(struct aoedev *d, unsigned char *id)
444 {
445 	u64 ssize;
446 	u16 n;
447 
448 	/* word 83: command set supported */
449 	n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
450 
451 	/* word 86: command set/feature enabled */
452 	n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
453 
454 	if (n & (1<<10)) {	/* bit 10: LBA 48 */
455 		d->flags |= DEVFL_EXT;
456 
457 		/* word 100: number lba48 sectors */
458 		ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
459 
460 		/* set as in ide-disk.c:init_idedisk_capacity */
461 		d->geo.cylinders = ssize;
462 		d->geo.cylinders /= (255 * 63);
463 		d->geo.heads = 255;
464 		d->geo.sectors = 63;
465 	} else {
466 		d->flags &= ~DEVFL_EXT;
467 
468 		/* number lba28 sectors */
469 		ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
470 
471 		/* NOTE: obsolete in ATA 6 */
472 		d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
473 		d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
474 		d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
475 	}
476 
477 	if (d->ssize != ssize)
478 		printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu sectors\n",
479 			(unsigned long long)mac_addr(d->addr),
480 			d->aoemajor, d->aoeminor,
481 			d->fw_ver, (long long)ssize);
482 	d->ssize = ssize;
483 	d->geo.start = 0;
484 	if (d->gd != NULL) {
485 		d->gd->capacity = ssize;
486 		d->flags |= DEVFL_NEWSIZE;
487 	} else {
488 		if (d->flags & DEVFL_GDALLOC) {
489 			printk(KERN_ERR "aoe: can't schedule work for e%lu.%lu, %s\n",
490 			       d->aoemajor, d->aoeminor,
491 			       "it's already on!  This shouldn't happen.\n");
492 			return;
493 		}
494 		d->flags |= DEVFL_GDALLOC;
495 	}
496 	schedule_work(&d->work);
497 }
498 
499 static void
500 calc_rttavg(struct aoedev *d, int rtt)
501 {
502 	register long n;
503 
504 	n = rtt;
505 	if (n < 0) {
506 		n = -rtt;
507 		if (n < MINTIMER)
508 			n = MINTIMER;
509 		else if (n > MAXTIMER)
510 			n = MAXTIMER;
511 		d->mintimer += (n - d->mintimer) >> 1;
512 	} else if (n < d->mintimer)
513 		n = d->mintimer;
514 	else if (n > MAXTIMER)
515 		n = MAXTIMER;
516 
517 	/* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
518 	n -= d->rttavg;
519 	d->rttavg += n >> 2;
520 }
521 
522 void
523 aoecmd_ata_rsp(struct sk_buff *skb)
524 {
525 	struct aoedev *d;
526 	struct aoe_hdr *hin, *hout;
527 	struct aoe_atahdr *ahin, *ahout;
528 	struct frame *f;
529 	struct buf *buf;
530 	struct sk_buff *sl;
531 	register long n;
532 	ulong flags;
533 	char ebuf[128];
534 	u16 aoemajor;
535 
536 	hin = aoe_hdr(skb);
537 	aoemajor = be16_to_cpu(get_unaligned(&hin->major));
538 	d = aoedev_by_aoeaddr(aoemajor, hin->minor);
539 	if (d == NULL) {
540 		snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
541 			"for unknown device %d.%d\n",
542 			 aoemajor, hin->minor);
543 		aoechr_error(ebuf);
544 		return;
545 	}
546 
547 	spin_lock_irqsave(&d->lock, flags);
548 
549 	n = be32_to_cpu(get_unaligned(&hin->tag));
550 	f = getframe(d, n);
551 	if (f == NULL) {
552 		calc_rttavg(d, -tsince(n));
553 		spin_unlock_irqrestore(&d->lock, flags);
554 		snprintf(ebuf, sizeof ebuf,
555 			"%15s e%d.%d    tag=%08x@%08lx\n",
556 			"unexpected rsp",
557 			be16_to_cpu(get_unaligned(&hin->major)),
558 			hin->minor,
559 			be32_to_cpu(get_unaligned(&hin->tag)),
560 			jiffies);
561 		aoechr_error(ebuf);
562 		return;
563 	}
564 
565 	calc_rttavg(d, tsince(f->tag));
566 
567 	ahin = (struct aoe_atahdr *) (hin+1);
568 	hout = aoe_hdr(f->skb);
569 	ahout = (struct aoe_atahdr *) (hout+1);
570 	buf = f->buf;
571 
572 	if (ahout->cmdstat == WIN_IDENTIFY)
573 		d->flags &= ~DEVFL_PAUSE;
574 	if (ahin->cmdstat & 0xa9) {	/* these bits cleared on success */
575 		printk(KERN_ERR
576 			"aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%ld\n",
577 			ahout->cmdstat, ahin->cmdstat,
578 			d->aoemajor, d->aoeminor);
579 		if (buf)
580 			buf->flags |= BUFFL_FAIL;
581 	} else {
582 		n = ahout->scnt << 9;
583 		switch (ahout->cmdstat) {
584 		case WIN_READ:
585 		case WIN_READ_EXT:
586 			if (skb->len - sizeof *hin - sizeof *ahin < n) {
587 				printk(KERN_ERR
588 					"aoe: runt data size in read.  skb->len=%d\n",
589 					skb->len);
590 				/* fail frame f?  just returning will rexmit. */
591 				spin_unlock_irqrestore(&d->lock, flags);
592 				return;
593 			}
594 			memcpy(f->bufaddr, ahin+1, n);
595 		case WIN_WRITE:
596 		case WIN_WRITE_EXT:
597 			if (f->bcnt -= n) {
598 				skb = f->skb;
599 				f->bufaddr += n;
600 				put_lba(ahout, f->lba += ahout->scnt);
601 				n = f->bcnt;
602 				if (n > DEFAULTBCNT)
603 					n = DEFAULTBCNT;
604 				ahout->scnt = n >> 9;
605 				if (ahout->aflags & AOEAFL_WRITE) {
606 					skb_fill_page_desc(skb, 0,
607 						virt_to_page(f->bufaddr),
608 						offset_in_page(f->bufaddr), n);
609 					skb->len = sizeof *hout + sizeof *ahout + n;
610 					skb->data_len = n;
611 				}
612 				f->tag = newtag(d);
613 				hout->tag = cpu_to_be32(f->tag);
614 				skb->dev = d->ifp;
615 				skb = skb_clone(skb, GFP_ATOMIC);
616 				spin_unlock_irqrestore(&d->lock, flags);
617 				if (skb)
618 					aoenet_xmit(skb);
619 				return;
620 			}
621 			if (n > DEFAULTBCNT)
622 				d->lostjumbo = 0;
623 			break;
624 		case WIN_IDENTIFY:
625 			if (skb->len - sizeof *hin - sizeof *ahin < 512) {
626 				printk(KERN_INFO
627 					"aoe: runt data size in ataid.  skb->len=%d\n",
628 					skb->len);
629 				spin_unlock_irqrestore(&d->lock, flags);
630 				return;
631 			}
632 			ataid_complete(d, (char *) (ahin+1));
633 			break;
634 		default:
635 			printk(KERN_INFO
636 				"aoe: unrecognized ata command %2.2Xh for %d.%d\n",
637 				ahout->cmdstat,
638 				be16_to_cpu(get_unaligned(&hin->major)),
639 				hin->minor);
640 		}
641 	}
642 
643 	if (buf) {
644 		buf->nframesout -= 1;
645 		if (buf->nframesout == 0 && buf->resid == 0) {
646 			unsigned long duration = jiffies - buf->start_time;
647 			unsigned long n_sect = buf->bio->bi_size >> 9;
648 			struct gendisk *disk = d->gd;
649 			const int rw = bio_data_dir(buf->bio);
650 
651 			disk_stat_inc(disk, ios[rw]);
652 			disk_stat_add(disk, ticks[rw], duration);
653 			disk_stat_add(disk, sectors[rw], n_sect);
654 			disk_stat_add(disk, io_ticks, duration);
655 			n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
656 			bio_endio(buf->bio, n);
657 			mempool_free(buf, d->bufpool);
658 		}
659 	}
660 
661 	f->buf = NULL;
662 	f->tag = FREETAG;
663 
664 	aoecmd_work(d);
665 	sl = d->sendq_hd;
666 	d->sendq_hd = d->sendq_tl = NULL;
667 
668 	spin_unlock_irqrestore(&d->lock, flags);
669 	aoenet_xmit(sl);
670 }
671 
672 void
673 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
674 {
675 	struct sk_buff *sl;
676 
677 	sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
678 
679 	aoenet_xmit(sl);
680 }
681 
682 /*
683  * Since we only call this in one place (and it only prepares one frame)
684  * we just return the skb.  Usually we'd chain it up to the aoedev sendq.
685  */
686 static struct sk_buff *
687 aoecmd_ata_id(struct aoedev *d)
688 {
689 	struct aoe_hdr *h;
690 	struct aoe_atahdr *ah;
691 	struct frame *f;
692 	struct sk_buff *skb;
693 
694 	f = freeframe(d);
695 	if (f == NULL) {
696 		printk(KERN_ERR "aoe: can't get a frame. This shouldn't happen.\n");
697 		return NULL;
698 	}
699 
700 	/* initialize the headers & frame */
701 	skb = f->skb;
702 	h = aoe_hdr(skb);
703 	ah = (struct aoe_atahdr *) (h+1);
704 	skb_put(skb, sizeof *h + sizeof *ah);
705 	memset(h, 0, skb->len);
706 	f->tag = aoehdr_atainit(d, h);
707 	f->waited = 0;
708 
709 	/* set up ata header */
710 	ah->scnt = 1;
711 	ah->cmdstat = WIN_IDENTIFY;
712 	ah->lba3 = 0xa0;
713 
714 	skb->dev = d->ifp;
715 
716 	d->rttavg = MAXTIMER;
717 	d->timer.function = rexmit_timer;
718 
719 	return skb_clone(skb, GFP_ATOMIC);
720 }
721 
722 void
723 aoecmd_cfg_rsp(struct sk_buff *skb)
724 {
725 	struct aoedev *d;
726 	struct aoe_hdr *h;
727 	struct aoe_cfghdr *ch;
728 	ulong flags, sysminor, aoemajor;
729 	struct sk_buff *sl;
730 	enum { MAXFRAMES = 16 };
731 	u16 n;
732 
733 	h = aoe_hdr(skb);
734 	ch = (struct aoe_cfghdr *) (h+1);
735 
736 	/*
737 	 * Enough people have their dip switches set backwards to
738 	 * warrant a loud message for this special case.
739 	 */
740 	aoemajor = be16_to_cpu(get_unaligned(&h->major));
741 	if (aoemajor == 0xfff) {
742 		printk(KERN_ERR "aoe: Warning: shelf address is all ones.  "
743 			"Check shelf dip switches.\n");
744 		return;
745 	}
746 
747 	sysminor = SYSMINOR(aoemajor, h->minor);
748 	if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
749 		printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
750 			aoemajor, (int) h->minor);
751 		return;
752 	}
753 
754 	n = be16_to_cpu(ch->bufcnt);
755 	if (n > MAXFRAMES)	/* keep it reasonable */
756 		n = MAXFRAMES;
757 
758 	d = aoedev_by_sysminor_m(sysminor, n);
759 	if (d == NULL) {
760 		printk(KERN_INFO "aoe: device sysminor_m failure\n");
761 		return;
762 	}
763 
764 	spin_lock_irqsave(&d->lock, flags);
765 
766 	/* permit device to migrate mac and network interface */
767 	d->ifp = skb->dev;
768 	memcpy(d->addr, h->src, sizeof d->addr);
769 	if (!(d->flags & DEVFL_MAXBCNT)) {
770 		n = d->ifp->mtu;
771 		n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
772 		n /= 512;
773 		if (n > ch->scnt)
774 			n = ch->scnt;
775 		n = n ? n * 512 : DEFAULTBCNT;
776 		if (n != d->maxbcnt) {
777 			printk(KERN_INFO
778 				"aoe: e%ld.%ld: setting %d byte data frames on %s\n",
779 				d->aoemajor, d->aoeminor, n, d->ifp->name);
780 			d->maxbcnt = n;
781 		}
782 	}
783 
784 	/* don't change users' perspective */
785 	if (d->nopen && !(d->flags & DEVFL_PAUSE)) {
786 		spin_unlock_irqrestore(&d->lock, flags);
787 		return;
788 	}
789 	d->flags |= DEVFL_PAUSE;	/* force pause */
790 	d->mintimer = MINTIMER;
791 	d->fw_ver = be16_to_cpu(ch->fwver);
792 
793 	/* check for already outstanding ataid */
794 	sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL;
795 
796 	spin_unlock_irqrestore(&d->lock, flags);
797 
798 	aoenet_xmit(sl);
799 }
800 
801