xref: /linux/drivers/infiniband/hw/qib/qib_init.c (revision 4e0ae876f77bc01a7e77724dea57b4b82bd53244)
1 /*
2  * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/vmalloc.h>
38 #include <linux/delay.h>
39 #include <linux/idr.h>
40 #include <linux/module.h>
41 #include <linux/printk.h>
42 #ifdef CONFIG_INFINIBAND_QIB_DCA
43 #include <linux/dca.h>
44 #endif
45 #include <rdma/rdma_vt.h>
46 
47 #include "qib.h"
48 #include "qib_common.h"
49 #include "qib_mad.h"
50 #ifdef CONFIG_DEBUG_FS
51 #include "qib_debugfs.h"
52 #include "qib_verbs.h"
53 #endif
54 
55 #undef pr_fmt
56 #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
57 
58 /*
59  * min buffers we want to have per context, after driver
60  */
61 #define QIB_MIN_USER_CTXT_BUFCNT 7
62 
63 #define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
64 #define QLOGIC_IB_R_SOFTWARE_SHIFT 24
65 #define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
66 
67 /*
68  * Number of ctxts we are configured to use (to allow for more pio
69  * buffers per ctxt, etc.)  Zero means use chip value.
70  */
71 ushort qib_cfgctxts;
72 module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
73 MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
74 
75 unsigned qib_numa_aware;
76 module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO);
77 MODULE_PARM_DESC(numa_aware,
78 	"0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process");
79 
80 /*
81  * If set, do not write to any regs if avoidable, hack to allow
82  * check for deranged default register values.
83  */
84 ushort qib_mini_init;
85 module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
86 MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
87 
88 unsigned qib_n_krcv_queues;
89 module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
90 MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
91 
92 unsigned qib_cc_table_size;
93 module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
94 MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
95 
96 static void verify_interrupt(struct timer_list *);
97 
98 static struct idr qib_unit_table;
99 u32 qib_cpulist_count;
100 unsigned long *qib_cpulist;
101 
102 /* set number of contexts we'll actually use */
103 void qib_set_ctxtcnt(struct qib_devdata *dd)
104 {
105 	if (!qib_cfgctxts) {
106 		dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
107 		if (dd->cfgctxts > dd->ctxtcnt)
108 			dd->cfgctxts = dd->ctxtcnt;
109 	} else if (qib_cfgctxts < dd->num_pports)
110 		dd->cfgctxts = dd->ctxtcnt;
111 	else if (qib_cfgctxts <= dd->ctxtcnt)
112 		dd->cfgctxts = qib_cfgctxts;
113 	else
114 		dd->cfgctxts = dd->ctxtcnt;
115 	dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
116 		dd->cfgctxts - dd->first_user_ctxt;
117 }
118 
119 /*
120  * Common code for creating the receive context array.
121  */
122 int qib_create_ctxts(struct qib_devdata *dd)
123 {
124 	unsigned i;
125 	int local_node_id = pcibus_to_node(dd->pcidev->bus);
126 
127 	if (local_node_id < 0)
128 		local_node_id = numa_node_id();
129 	dd->assigned_node_id = local_node_id;
130 
131 	/*
132 	 * Allocate full ctxtcnt array, rather than just cfgctxts, because
133 	 * cleanup iterates across all possible ctxts.
134 	 */
135 	dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
136 	if (!dd->rcd)
137 		return -ENOMEM;
138 
139 	/* create (one or more) kctxt */
140 	for (i = 0; i < dd->first_user_ctxt; ++i) {
141 		struct qib_pportdata *ppd;
142 		struct qib_ctxtdata *rcd;
143 
144 		if (dd->skip_kctxt_mask & (1 << i))
145 			continue;
146 
147 		ppd = dd->pport + (i % dd->num_pports);
148 
149 		rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id);
150 		if (!rcd) {
151 			qib_dev_err(dd,
152 				"Unable to allocate ctxtdata for Kernel ctxt, failing\n");
153 			kfree(dd->rcd);
154 			dd->rcd = NULL;
155 			return -ENOMEM;
156 		}
157 		rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
158 		rcd->seq_cnt = 1;
159 	}
160 	return 0;
161 }
162 
163 /*
164  * Common code for user and kernel context setup.
165  */
166 struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt,
167 	int node_id)
168 {
169 	struct qib_devdata *dd = ppd->dd;
170 	struct qib_ctxtdata *rcd;
171 
172 	rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id);
173 	if (rcd) {
174 		INIT_LIST_HEAD(&rcd->qp_wait_list);
175 		rcd->node_id = node_id;
176 		rcd->ppd = ppd;
177 		rcd->dd = dd;
178 		rcd->cnt = 1;
179 		rcd->ctxt = ctxt;
180 		dd->rcd[ctxt] = rcd;
181 #ifdef CONFIG_DEBUG_FS
182 		if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
183 			rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
184 				GFP_KERNEL, node_id);
185 			if (!rcd->opstats) {
186 				kfree(rcd);
187 				qib_dev_err(dd,
188 					"Unable to allocate per ctxt stats buffer\n");
189 				return NULL;
190 			}
191 		}
192 #endif
193 		dd->f_init_ctxt(rcd);
194 
195 		/*
196 		 * To avoid wasting a lot of memory, we allocate 32KB chunks
197 		 * of physically contiguous memory, advance through it until
198 		 * used up and then allocate more.  Of course, we need
199 		 * memory to store those extra pointers, now.  32KB seems to
200 		 * be the most that is "safe" under memory pressure
201 		 * (creating large files and then copying them over
202 		 * NFS while doing lots of MPI jobs).  The OOM killer can
203 		 * get invoked, even though we say we can sleep and this can
204 		 * cause significant system problems....
205 		 */
206 		rcd->rcvegrbuf_size = 0x8000;
207 		rcd->rcvegrbufs_perchunk =
208 			rcd->rcvegrbuf_size / dd->rcvegrbufsize;
209 		rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
210 			rcd->rcvegrbufs_perchunk - 1) /
211 			rcd->rcvegrbufs_perchunk;
212 		rcd->rcvegrbufs_perchunk_shift =
213 			ilog2(rcd->rcvegrbufs_perchunk);
214 	}
215 	return rcd;
216 }
217 
218 /*
219  * Common code for initializing the physical port structure.
220  */
221 int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
222 			u8 hw_pidx, u8 port)
223 {
224 	int size;
225 
226 	ppd->dd = dd;
227 	ppd->hw_pidx = hw_pidx;
228 	ppd->port = port; /* IB port number, not index */
229 
230 	spin_lock_init(&ppd->sdma_lock);
231 	spin_lock_init(&ppd->lflags_lock);
232 	spin_lock_init(&ppd->cc_shadow_lock);
233 	init_waitqueue_head(&ppd->state_wait);
234 
235 	timer_setup(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup, 0);
236 
237 	ppd->qib_wq = NULL;
238 	ppd->ibport_data.pmastats =
239 		alloc_percpu(struct qib_pma_counters);
240 	if (!ppd->ibport_data.pmastats)
241 		return -ENOMEM;
242 	ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
243 	ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
244 	ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
245 	if (!(ppd->ibport_data.rvp.rc_acks) ||
246 	    !(ppd->ibport_data.rvp.rc_qacks) ||
247 	    !(ppd->ibport_data.rvp.rc_delayed_comp))
248 		return -ENOMEM;
249 
250 	if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
251 		goto bail;
252 
253 	ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size,
254 		IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT);
255 
256 	ppd->cc_max_table_entries =
257 		ppd->cc_supported_table_entries/IB_CCT_ENTRIES;
258 
259 	size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
260 		* IB_CCT_ENTRIES;
261 	ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
262 	if (!ppd->ccti_entries)
263 		goto bail;
264 
265 	size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
266 	ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
267 	if (!ppd->congestion_entries)
268 		goto bail_1;
269 
270 	size = sizeof(struct cc_table_shadow);
271 	ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
272 	if (!ppd->ccti_entries_shadow)
273 		goto bail_2;
274 
275 	size = sizeof(struct ib_cc_congestion_setting_attr);
276 	ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
277 	if (!ppd->congestion_entries_shadow)
278 		goto bail_3;
279 
280 	return 0;
281 
282 bail_3:
283 	kfree(ppd->ccti_entries_shadow);
284 	ppd->ccti_entries_shadow = NULL;
285 bail_2:
286 	kfree(ppd->congestion_entries);
287 	ppd->congestion_entries = NULL;
288 bail_1:
289 	kfree(ppd->ccti_entries);
290 	ppd->ccti_entries = NULL;
291 bail:
292 	/* User is intentionally disabling the congestion control agent */
293 	if (!qib_cc_table_size)
294 		return 0;
295 
296 	if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) {
297 		qib_cc_table_size = 0;
298 		qib_dev_err(dd,
299 		 "Congestion Control table size %d less than minimum %d for port %d\n",
300 		 qib_cc_table_size, IB_CCT_MIN_ENTRIES, port);
301 	}
302 
303 	qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
304 		port);
305 	return 0;
306 }
307 
308 static int init_pioavailregs(struct qib_devdata *dd)
309 {
310 	int ret, pidx;
311 	u64 *status_page;
312 
313 	dd->pioavailregs_dma = dma_alloc_coherent(
314 		&dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
315 		GFP_KERNEL);
316 	if (!dd->pioavailregs_dma) {
317 		qib_dev_err(dd,
318 			"failed to allocate PIOavail reg area in memory\n");
319 		ret = -ENOMEM;
320 		goto done;
321 	}
322 
323 	/*
324 	 * We really want L2 cache aligned, but for current CPUs of
325 	 * interest, they are the same.
326 	 */
327 	status_page = (u64 *)
328 		((char *) dd->pioavailregs_dma +
329 		 ((2 * L1_CACHE_BYTES +
330 		   dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
331 	/* device status comes first, for backwards compatibility */
332 	dd->devstatusp = status_page;
333 	*status_page++ = 0;
334 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
335 		dd->pport[pidx].statusp = status_page;
336 		*status_page++ = 0;
337 	}
338 
339 	/*
340 	 * Setup buffer to hold freeze and other messages, accessible to
341 	 * apps, following statusp.  This is per-unit, not per port.
342 	 */
343 	dd->freezemsg = (char *) status_page;
344 	*dd->freezemsg = 0;
345 	/* length of msg buffer is "whatever is left" */
346 	ret = (char *) status_page - (char *) dd->pioavailregs_dma;
347 	dd->freezelen = PAGE_SIZE - ret;
348 
349 	ret = 0;
350 
351 done:
352 	return ret;
353 }
354 
355 /**
356  * init_shadow_tids - allocate the shadow TID array
357  * @dd: the qlogic_ib device
358  *
359  * allocate the shadow TID array, so we can qib_munlock previous
360  * entries.  It may make more sense to move the pageshadow to the
361  * ctxt data structure, so we only allocate memory for ctxts actually
362  * in use, since we at 8k per ctxt, now.
363  * We don't want failures here to prevent use of the driver/chip,
364  * so no return value.
365  */
366 static void init_shadow_tids(struct qib_devdata *dd)
367 {
368 	struct page **pages;
369 	dma_addr_t *addrs;
370 
371 	pages = vzalloc(array_size(sizeof(struct page *),
372 				   dd->cfgctxts * dd->rcvtidcnt));
373 	if (!pages)
374 		goto bail;
375 
376 	addrs = vzalloc(array_size(sizeof(dma_addr_t),
377 				   dd->cfgctxts * dd->rcvtidcnt));
378 	if (!addrs)
379 		goto bail_free;
380 
381 	dd->pageshadow = pages;
382 	dd->physshadow = addrs;
383 	return;
384 
385 bail_free:
386 	vfree(pages);
387 bail:
388 	dd->pageshadow = NULL;
389 }
390 
391 /*
392  * Do initialization for device that is only needed on
393  * first detect, not on resets.
394  */
395 static int loadtime_init(struct qib_devdata *dd)
396 {
397 	int ret = 0;
398 
399 	if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
400 	     QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
401 		qib_dev_err(dd,
402 			"Driver only handles version %d, chip swversion is %d (%llx), failing\n",
403 			QIB_CHIP_SWVERSION,
404 			(int)(dd->revision >>
405 				QLOGIC_IB_R_SOFTWARE_SHIFT) &
406 				QLOGIC_IB_R_SOFTWARE_MASK,
407 			(unsigned long long) dd->revision);
408 		ret = -ENOSYS;
409 		goto done;
410 	}
411 
412 	if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
413 		qib_devinfo(dd->pcidev, "%s", dd->boardversion);
414 
415 	spin_lock_init(&dd->pioavail_lock);
416 	spin_lock_init(&dd->sendctrl_lock);
417 	spin_lock_init(&dd->uctxt_lock);
418 	spin_lock_init(&dd->qib_diag_trans_lock);
419 	spin_lock_init(&dd->eep_st_lock);
420 	mutex_init(&dd->eep_lock);
421 
422 	if (qib_mini_init)
423 		goto done;
424 
425 	ret = init_pioavailregs(dd);
426 	init_shadow_tids(dd);
427 
428 	qib_get_eeprom_info(dd);
429 
430 	/* setup time (don't start yet) to verify we got interrupt */
431 	timer_setup(&dd->intrchk_timer, verify_interrupt, 0);
432 done:
433 	return ret;
434 }
435 
436 /**
437  * init_after_reset - re-initialize after a reset
438  * @dd: the qlogic_ib device
439  *
440  * sanity check at least some of the values after reset, and
441  * ensure no receive or transmit (explicitly, in case reset
442  * failed
443  */
444 static int init_after_reset(struct qib_devdata *dd)
445 {
446 	int i;
447 
448 	/*
449 	 * Ensure chip does no sends or receives, tail updates, or
450 	 * pioavail updates while we re-initialize.  This is mostly
451 	 * for the driver data structures, not chip registers.
452 	 */
453 	for (i = 0; i < dd->num_pports; ++i) {
454 		/*
455 		 * ctxt == -1 means "all contexts". Only really safe for
456 		 * _dis_abling things, as here.
457 		 */
458 		dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
459 				  QIB_RCVCTRL_INTRAVAIL_DIS |
460 				  QIB_RCVCTRL_TAILUPD_DIS, -1);
461 		/* Redundant across ports for some, but no big deal.  */
462 		dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
463 			QIB_SENDCTRL_AVAIL_DIS);
464 	}
465 
466 	return 0;
467 }
468 
469 static void enable_chip(struct qib_devdata *dd)
470 {
471 	u64 rcvmask;
472 	int i;
473 
474 	/*
475 	 * Enable PIO send, and update of PIOavail regs to memory.
476 	 */
477 	for (i = 0; i < dd->num_pports; ++i)
478 		dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
479 			QIB_SENDCTRL_AVAIL_ENB);
480 	/*
481 	 * Enable kernel ctxts' receive and receive interrupt.
482 	 * Other ctxts done as user opens and inits them.
483 	 */
484 	rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
485 	rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
486 		  QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
487 	for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
488 		struct qib_ctxtdata *rcd = dd->rcd[i];
489 
490 		if (rcd)
491 			dd->f_rcvctrl(rcd->ppd, rcvmask, i);
492 	}
493 }
494 
495 static void verify_interrupt(struct timer_list *t)
496 {
497 	struct qib_devdata *dd = from_timer(dd, t, intrchk_timer);
498 	u64 int_counter;
499 
500 	if (!dd)
501 		return; /* being torn down */
502 
503 	/*
504 	 * If we don't have a lid or any interrupts, let the user know and
505 	 * don't bother checking again.
506 	 */
507 	int_counter = qib_int_counter(dd) - dd->z_int_counter;
508 	if (int_counter == 0) {
509 		if (!dd->f_intr_fallback(dd))
510 			dev_err(&dd->pcidev->dev,
511 				"No interrupts detected, not usable.\n");
512 		else /* re-arm the timer to see if fallback works */
513 			mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
514 	}
515 }
516 
517 static void init_piobuf_state(struct qib_devdata *dd)
518 {
519 	int i, pidx;
520 	u32 uctxts;
521 
522 	/*
523 	 * Ensure all buffers are free, and fifos empty.  Buffers
524 	 * are common, so only do once for port 0.
525 	 *
526 	 * After enable and qib_chg_pioavailkernel so we can safely
527 	 * enable pioavail updates and PIOENABLE.  After this, packets
528 	 * are ready and able to go out.
529 	 */
530 	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
531 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
532 		dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
533 
534 	/*
535 	 * If not all sendbufs are used, add the one to each of the lower
536 	 * numbered contexts.  pbufsctxt and lastctxt_piobuf are
537 	 * calculated in chip-specific code because it may cause some
538 	 * chip-specific adjustments to be made.
539 	 */
540 	uctxts = dd->cfgctxts - dd->first_user_ctxt;
541 	dd->ctxts_extrabuf = dd->pbufsctxt ?
542 		dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
543 
544 	/*
545 	 * Set up the shadow copies of the piobufavail registers,
546 	 * which we compare against the chip registers for now, and
547 	 * the in memory DMA'ed copies of the registers.
548 	 * By now pioavail updates to memory should have occurred, so
549 	 * copy them into our working/shadow registers; this is in
550 	 * case something went wrong with abort, but mostly to get the
551 	 * initial values of the generation bit correct.
552 	 */
553 	for (i = 0; i < dd->pioavregs; i++) {
554 		__le64 tmp;
555 
556 		tmp = dd->pioavailregs_dma[i];
557 		/*
558 		 * Don't need to worry about pioavailkernel here
559 		 * because we will call qib_chg_pioavailkernel() later
560 		 * in initialization, to busy out buffers as needed.
561 		 */
562 		dd->pioavailshadow[i] = le64_to_cpu(tmp);
563 	}
564 	while (i < ARRAY_SIZE(dd->pioavailshadow))
565 		dd->pioavailshadow[i++] = 0; /* for debugging sanity */
566 
567 	/* after pioavailshadow is setup */
568 	qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
569 			       TXCHK_CHG_TYPE_KERN, NULL);
570 	dd->f_initvl15_bufs(dd);
571 }
572 
573 /**
574  * qib_create_workqueues - create per port workqueues
575  * @dd: the qlogic_ib device
576  */
577 static int qib_create_workqueues(struct qib_devdata *dd)
578 {
579 	int pidx;
580 	struct qib_pportdata *ppd;
581 
582 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
583 		ppd = dd->pport + pidx;
584 		if (!ppd->qib_wq) {
585 			char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
586 
587 			snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
588 				dd->unit, pidx);
589 			ppd->qib_wq = alloc_ordered_workqueue(wq_name,
590 							      WQ_MEM_RECLAIM);
591 			if (!ppd->qib_wq)
592 				goto wq_error;
593 		}
594 	}
595 	return 0;
596 wq_error:
597 	pr_err("create_singlethread_workqueue failed for port %d\n",
598 		pidx + 1);
599 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
600 		ppd = dd->pport + pidx;
601 		if (ppd->qib_wq) {
602 			destroy_workqueue(ppd->qib_wq);
603 			ppd->qib_wq = NULL;
604 		}
605 	}
606 	return -ENOMEM;
607 }
608 
609 static void qib_free_pportdata(struct qib_pportdata *ppd)
610 {
611 	free_percpu(ppd->ibport_data.pmastats);
612 	free_percpu(ppd->ibport_data.rvp.rc_acks);
613 	free_percpu(ppd->ibport_data.rvp.rc_qacks);
614 	free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
615 	ppd->ibport_data.pmastats = NULL;
616 }
617 
618 /**
619  * qib_init - do the actual initialization sequence on the chip
620  * @dd: the qlogic_ib device
621  * @reinit: reinitializing, so don't allocate new memory
622  *
623  * Do the actual initialization sequence on the chip.  This is done
624  * both from the init routine called from the PCI infrastructure, and
625  * when we reset the chip, or detect that it was reset internally,
626  * or it's administratively re-enabled.
627  *
628  * Memory allocation here and in called routines is only done in
629  * the first case (reinit == 0).  We have to be careful, because even
630  * without memory allocation, we need to re-write all the chip registers
631  * TIDs, etc. after the reset or enable has completed.
632  */
633 int qib_init(struct qib_devdata *dd, int reinit)
634 {
635 	int ret = 0, pidx, lastfail = 0;
636 	u32 portok = 0;
637 	unsigned i;
638 	struct qib_ctxtdata *rcd;
639 	struct qib_pportdata *ppd;
640 	unsigned long flags;
641 
642 	/* Set linkstate to unknown, so we can watch for a transition. */
643 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
644 		ppd = dd->pport + pidx;
645 		spin_lock_irqsave(&ppd->lflags_lock, flags);
646 		ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
647 				 QIBL_LINKDOWN | QIBL_LINKINIT |
648 				 QIBL_LINKV);
649 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
650 	}
651 
652 	if (reinit)
653 		ret = init_after_reset(dd);
654 	else
655 		ret = loadtime_init(dd);
656 	if (ret)
657 		goto done;
658 
659 	/* Bypass most chip-init, to get to device creation */
660 	if (qib_mini_init)
661 		return 0;
662 
663 	ret = dd->f_late_initreg(dd);
664 	if (ret)
665 		goto done;
666 
667 	/* dd->rcd can be NULL if early init failed */
668 	for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
669 		/*
670 		 * Set up the (kernel) rcvhdr queue and egr TIDs.  If doing
671 		 * re-init, the simplest way to handle this is to free
672 		 * existing, and re-allocate.
673 		 * Need to re-create rest of ctxt 0 ctxtdata as well.
674 		 */
675 		rcd = dd->rcd[i];
676 		if (!rcd)
677 			continue;
678 
679 		lastfail = qib_create_rcvhdrq(dd, rcd);
680 		if (!lastfail)
681 			lastfail = qib_setup_eagerbufs(rcd);
682 		if (lastfail)
683 			qib_dev_err(dd,
684 				"failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
685 	}
686 
687 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
688 		int mtu;
689 
690 		if (lastfail)
691 			ret = lastfail;
692 		ppd = dd->pport + pidx;
693 		mtu = ib_mtu_enum_to_int(qib_ibmtu);
694 		if (mtu == -1) {
695 			mtu = QIB_DEFAULT_MTU;
696 			qib_ibmtu = 0; /* don't leave invalid value */
697 		}
698 		/* set max we can ever have for this driver load */
699 		ppd->init_ibmaxlen = min(mtu > 2048 ?
700 					 dd->piosize4k : dd->piosize2k,
701 					 dd->rcvegrbufsize +
702 					 (dd->rcvhdrentsize << 2));
703 		/*
704 		 * Have to initialize ibmaxlen, but this will normally
705 		 * change immediately in qib_set_mtu().
706 		 */
707 		ppd->ibmaxlen = ppd->init_ibmaxlen;
708 		qib_set_mtu(ppd, mtu);
709 
710 		spin_lock_irqsave(&ppd->lflags_lock, flags);
711 		ppd->lflags |= QIBL_IB_LINK_DISABLED;
712 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
713 
714 		lastfail = dd->f_bringup_serdes(ppd);
715 		if (lastfail) {
716 			qib_devinfo(dd->pcidev,
717 				 "Failed to bringup IB port %u\n", ppd->port);
718 			lastfail = -ENETDOWN;
719 			continue;
720 		}
721 
722 		portok++;
723 	}
724 
725 	if (!portok) {
726 		/* none of the ports initialized */
727 		if (!ret && lastfail)
728 			ret = lastfail;
729 		else if (!ret)
730 			ret = -ENETDOWN;
731 		/* but continue on, so we can debug cause */
732 	}
733 
734 	enable_chip(dd);
735 
736 	init_piobuf_state(dd);
737 
738 done:
739 	if (!ret) {
740 		/* chip is OK for user apps; mark it as initialized */
741 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
742 			ppd = dd->pport + pidx;
743 			/*
744 			 * Set status even if port serdes is not initialized
745 			 * so that diags will work.
746 			 */
747 			*ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
748 				QIB_STATUS_INITTED;
749 			if (!ppd->link_speed_enabled)
750 				continue;
751 			if (dd->flags & QIB_HAS_SEND_DMA)
752 				ret = qib_setup_sdma(ppd);
753 			timer_setup(&ppd->hol_timer, qib_hol_event, 0);
754 			ppd->hol_state = QIB_HOL_UP;
755 		}
756 
757 		/* now we can enable all interrupts from the chip */
758 		dd->f_set_intr_state(dd, 1);
759 
760 		/*
761 		 * Setup to verify we get an interrupt, and fallback
762 		 * to an alternate if necessary and possible.
763 		 */
764 		mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
765 		/* start stats retrieval timer */
766 		mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
767 	}
768 
769 	/* if ret is non-zero, we probably should do some cleanup here... */
770 	return ret;
771 }
772 
773 /*
774  * These next two routines are placeholders in case we don't have per-arch
775  * code for controlling write combining.  If explicit control of write
776  * combining is not available, performance will probably be awful.
777  */
778 
779 int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
780 {
781 	return -EOPNOTSUPP;
782 }
783 
784 void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
785 {
786 }
787 
788 static inline struct qib_devdata *__qib_lookup(int unit)
789 {
790 	return idr_find(&qib_unit_table, unit);
791 }
792 
793 struct qib_devdata *qib_lookup(int unit)
794 {
795 	struct qib_devdata *dd;
796 	unsigned long flags;
797 
798 	spin_lock_irqsave(&qib_devs_lock, flags);
799 	dd = __qib_lookup(unit);
800 	spin_unlock_irqrestore(&qib_devs_lock, flags);
801 
802 	return dd;
803 }
804 
805 /*
806  * Stop the timers during unit shutdown, or after an error late
807  * in initialization.
808  */
809 static void qib_stop_timers(struct qib_devdata *dd)
810 {
811 	struct qib_pportdata *ppd;
812 	int pidx;
813 
814 	if (dd->stats_timer.function)
815 		del_timer_sync(&dd->stats_timer);
816 	if (dd->intrchk_timer.function)
817 		del_timer_sync(&dd->intrchk_timer);
818 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
819 		ppd = dd->pport + pidx;
820 		if (ppd->hol_timer.function)
821 			del_timer_sync(&ppd->hol_timer);
822 		if (ppd->led_override_timer.function) {
823 			del_timer_sync(&ppd->led_override_timer);
824 			atomic_set(&ppd->led_override_timer_active, 0);
825 		}
826 		if (ppd->symerr_clear_timer.function)
827 			del_timer_sync(&ppd->symerr_clear_timer);
828 	}
829 }
830 
831 /**
832  * qib_shutdown_device - shut down a device
833  * @dd: the qlogic_ib device
834  *
835  * This is called to make the device quiet when we are about to
836  * unload the driver, and also when the device is administratively
837  * disabled.   It does not free any data structures.
838  * Everything it does has to be setup again by qib_init(dd, 1)
839  */
840 static void qib_shutdown_device(struct qib_devdata *dd)
841 {
842 	struct qib_pportdata *ppd;
843 	unsigned pidx;
844 
845 	if (dd->flags & QIB_SHUTDOWN)
846 		return;
847 	dd->flags |= QIB_SHUTDOWN;
848 
849 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
850 		ppd = dd->pport + pidx;
851 
852 		spin_lock_irq(&ppd->lflags_lock);
853 		ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
854 				 QIBL_LINKARMED | QIBL_LINKACTIVE |
855 				 QIBL_LINKV);
856 		spin_unlock_irq(&ppd->lflags_lock);
857 		*ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
858 	}
859 	dd->flags &= ~QIB_INITTED;
860 
861 	/* mask interrupts, but not errors */
862 	dd->f_set_intr_state(dd, 0);
863 
864 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
865 		ppd = dd->pport + pidx;
866 		dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
867 				   QIB_RCVCTRL_CTXT_DIS |
868 				   QIB_RCVCTRL_INTRAVAIL_DIS |
869 				   QIB_RCVCTRL_PKEY_ENB, -1);
870 		/*
871 		 * Gracefully stop all sends allowing any in progress to
872 		 * trickle out first.
873 		 */
874 		dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
875 	}
876 
877 	/*
878 	 * Enough for anything that's going to trickle out to have actually
879 	 * done so.
880 	 */
881 	udelay(20);
882 
883 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
884 		ppd = dd->pport + pidx;
885 		dd->f_setextled(ppd, 0); /* make sure LEDs are off */
886 
887 		if (dd->flags & QIB_HAS_SEND_DMA)
888 			qib_teardown_sdma(ppd);
889 
890 		dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
891 				    QIB_SENDCTRL_SEND_DIS);
892 		/*
893 		 * Clear SerdesEnable.
894 		 * We can't count on interrupts since we are stopping.
895 		 */
896 		dd->f_quiet_serdes(ppd);
897 
898 		if (ppd->qib_wq) {
899 			destroy_workqueue(ppd->qib_wq);
900 			ppd->qib_wq = NULL;
901 		}
902 		qib_free_pportdata(ppd);
903 	}
904 
905 }
906 
907 /**
908  * qib_free_ctxtdata - free a context's allocated data
909  * @dd: the qlogic_ib device
910  * @rcd: the ctxtdata structure
911  *
912  * free up any allocated data for a context
913  * This should not touch anything that would affect a simultaneous
914  * re-allocation of context data, because it is called after qib_mutex
915  * is released (and can be called from reinit as well).
916  * It should never change any chip state, or global driver state.
917  */
918 void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
919 {
920 	if (!rcd)
921 		return;
922 
923 	if (rcd->rcvhdrq) {
924 		dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
925 				  rcd->rcvhdrq, rcd->rcvhdrq_phys);
926 		rcd->rcvhdrq = NULL;
927 		if (rcd->rcvhdrtail_kvaddr) {
928 			dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
929 					  rcd->rcvhdrtail_kvaddr,
930 					  rcd->rcvhdrqtailaddr_phys);
931 			rcd->rcvhdrtail_kvaddr = NULL;
932 		}
933 	}
934 	if (rcd->rcvegrbuf) {
935 		unsigned e;
936 
937 		for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
938 			void *base = rcd->rcvegrbuf[e];
939 			size_t size = rcd->rcvegrbuf_size;
940 
941 			dma_free_coherent(&dd->pcidev->dev, size,
942 					  base, rcd->rcvegrbuf_phys[e]);
943 		}
944 		kfree(rcd->rcvegrbuf);
945 		rcd->rcvegrbuf = NULL;
946 		kfree(rcd->rcvegrbuf_phys);
947 		rcd->rcvegrbuf_phys = NULL;
948 		rcd->rcvegrbuf_chunks = 0;
949 	}
950 
951 	kfree(rcd->tid_pg_list);
952 	vfree(rcd->user_event_mask);
953 	vfree(rcd->subctxt_uregbase);
954 	vfree(rcd->subctxt_rcvegrbuf);
955 	vfree(rcd->subctxt_rcvhdr_base);
956 #ifdef CONFIG_DEBUG_FS
957 	kfree(rcd->opstats);
958 	rcd->opstats = NULL;
959 #endif
960 	kfree(rcd);
961 }
962 
963 /*
964  * Perform a PIO buffer bandwidth write test, to verify proper system
965  * configuration.  Even when all the setup calls work, occasionally
966  * BIOS or other issues can prevent write combining from working, or
967  * can cause other bandwidth problems to the chip.
968  *
969  * This test simply writes the same buffer over and over again, and
970  * measures close to the peak bandwidth to the chip (not testing
971  * data bandwidth to the wire).   On chips that use an address-based
972  * trigger to send packets to the wire, this is easy.  On chips that
973  * use a count to trigger, we want to make sure that the packet doesn't
974  * go out on the wire, or trigger flow control checks.
975  */
976 static void qib_verify_pioperf(struct qib_devdata *dd)
977 {
978 	u32 pbnum, cnt, lcnt;
979 	u32 __iomem *piobuf;
980 	u32 *addr;
981 	u64 msecs, emsecs;
982 
983 	piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
984 	if (!piobuf) {
985 		qib_devinfo(dd->pcidev,
986 			 "No PIObufs for checking perf, skipping\n");
987 		return;
988 	}
989 
990 	/*
991 	 * Enough to give us a reasonable test, less than piobuf size, and
992 	 * likely multiple of store buffer length.
993 	 */
994 	cnt = 1024;
995 
996 	addr = vmalloc(cnt);
997 	if (!addr)
998 		goto done;
999 
1000 	preempt_disable();  /* we want reasonably accurate elapsed time */
1001 	msecs = 1 + jiffies_to_msecs(jiffies);
1002 	for (lcnt = 0; lcnt < 10000U; lcnt++) {
1003 		/* wait until we cross msec boundary */
1004 		if (jiffies_to_msecs(jiffies) >= msecs)
1005 			break;
1006 		udelay(1);
1007 	}
1008 
1009 	dd->f_set_armlaunch(dd, 0);
1010 
1011 	/*
1012 	 * length 0, no dwords actually sent
1013 	 */
1014 	writeq(0, piobuf);
1015 	qib_flush_wc();
1016 
1017 	/*
1018 	 * This is only roughly accurate, since even with preempt we
1019 	 * still take interrupts that could take a while.   Running for
1020 	 * >= 5 msec seems to get us "close enough" to accurate values.
1021 	 */
1022 	msecs = jiffies_to_msecs(jiffies);
1023 	for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
1024 		qib_pio_copy(piobuf + 64, addr, cnt >> 2);
1025 		emsecs = jiffies_to_msecs(jiffies) - msecs;
1026 	}
1027 
1028 	/* 1 GiB/sec, slightly over IB SDR line rate */
1029 	if (lcnt < (emsecs * 1024U))
1030 		qib_dev_err(dd,
1031 			    "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n",
1032 			    lcnt / (u32) emsecs);
1033 
1034 	preempt_enable();
1035 
1036 	vfree(addr);
1037 
1038 done:
1039 	/* disarm piobuf, so it's available again */
1040 	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
1041 	qib_sendbuf_done(dd, pbnum);
1042 	dd->f_set_armlaunch(dd, 1);
1043 }
1044 
1045 void qib_free_devdata(struct qib_devdata *dd)
1046 {
1047 	unsigned long flags;
1048 
1049 	spin_lock_irqsave(&qib_devs_lock, flags);
1050 	idr_remove(&qib_unit_table, dd->unit);
1051 	list_del(&dd->list);
1052 	spin_unlock_irqrestore(&qib_devs_lock, flags);
1053 
1054 #ifdef CONFIG_DEBUG_FS
1055 	qib_dbg_ibdev_exit(&dd->verbs_dev);
1056 #endif
1057 	free_percpu(dd->int_counter);
1058 	rvt_dealloc_device(&dd->verbs_dev.rdi);
1059 }
1060 
1061 u64 qib_int_counter(struct qib_devdata *dd)
1062 {
1063 	int cpu;
1064 	u64 int_counter = 0;
1065 
1066 	for_each_possible_cpu(cpu)
1067 		int_counter += *per_cpu_ptr(dd->int_counter, cpu);
1068 	return int_counter;
1069 }
1070 
1071 u64 qib_sps_ints(void)
1072 {
1073 	unsigned long flags;
1074 	struct qib_devdata *dd;
1075 	u64 sps_ints = 0;
1076 
1077 	spin_lock_irqsave(&qib_devs_lock, flags);
1078 	list_for_each_entry(dd, &qib_dev_list, list) {
1079 		sps_ints += qib_int_counter(dd);
1080 	}
1081 	spin_unlock_irqrestore(&qib_devs_lock, flags);
1082 	return sps_ints;
1083 }
1084 
1085 /*
1086  * Allocate our primary per-unit data structure.  Must be done via verbs
1087  * allocator, because the verbs cleanup process both does cleanup and
1088  * free of the data structure.
1089  * "extra" is for chip-specific data.
1090  *
1091  * Use the idr mechanism to get a unit number for this unit.
1092  */
1093 struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
1094 {
1095 	unsigned long flags;
1096 	struct qib_devdata *dd;
1097 	int ret, nports;
1098 
1099 	/* extra is * number of ports */
1100 	nports = extra / sizeof(struct qib_pportdata);
1101 	dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1102 						    nports);
1103 	if (!dd)
1104 		return ERR_PTR(-ENOMEM);
1105 
1106 	INIT_LIST_HEAD(&dd->list);
1107 
1108 	idr_preload(GFP_KERNEL);
1109 	spin_lock_irqsave(&qib_devs_lock, flags);
1110 
1111 	ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT);
1112 	if (ret >= 0) {
1113 		dd->unit = ret;
1114 		list_add(&dd->list, &qib_dev_list);
1115 	}
1116 
1117 	spin_unlock_irqrestore(&qib_devs_lock, flags);
1118 	idr_preload_end();
1119 
1120 	if (ret < 0) {
1121 		qib_early_err(&pdev->dev,
1122 			      "Could not allocate unit ID: error %d\n", -ret);
1123 		goto bail;
1124 	}
1125 	rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit);
1126 
1127 	dd->int_counter = alloc_percpu(u64);
1128 	if (!dd->int_counter) {
1129 		ret = -ENOMEM;
1130 		qib_early_err(&pdev->dev,
1131 			      "Could not allocate per-cpu int_counter\n");
1132 		goto bail;
1133 	}
1134 
1135 	if (!qib_cpulist_count) {
1136 		u32 count = num_online_cpus();
1137 
1138 		qib_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
1139 				      GFP_KERNEL);
1140 		if (qib_cpulist)
1141 			qib_cpulist_count = count;
1142 	}
1143 #ifdef CONFIG_DEBUG_FS
1144 	qib_dbg_ibdev_init(&dd->verbs_dev);
1145 #endif
1146 	return dd;
1147 bail:
1148 	if (!list_empty(&dd->list))
1149 		list_del_init(&dd->list);
1150 	rvt_dealloc_device(&dd->verbs_dev.rdi);
1151 	return ERR_PTR(ret);
1152 }
1153 
1154 /*
1155  * Called from freeze mode handlers, and from PCI error
1156  * reporting code.  Should be paranoid about state of
1157  * system and data structures.
1158  */
1159 void qib_disable_after_error(struct qib_devdata *dd)
1160 {
1161 	if (dd->flags & QIB_INITTED) {
1162 		u32 pidx;
1163 
1164 		dd->flags &= ~QIB_INITTED;
1165 		if (dd->pport)
1166 			for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1167 				struct qib_pportdata *ppd;
1168 
1169 				ppd = dd->pport + pidx;
1170 				if (dd->flags & QIB_PRESENT) {
1171 					qib_set_linkstate(ppd,
1172 						QIB_IB_LINKDOWN_DISABLE);
1173 					dd->f_setextled(ppd, 0);
1174 				}
1175 				*ppd->statusp &= ~QIB_STATUS_IB_READY;
1176 			}
1177 	}
1178 
1179 	/*
1180 	 * Mark as having had an error for driver, and also
1181 	 * for /sys and status word mapped to user programs.
1182 	 * This marks unit as not usable, until reset.
1183 	 */
1184 	if (dd->devstatusp)
1185 		*dd->devstatusp |= QIB_STATUS_HWERROR;
1186 }
1187 
1188 static void qib_remove_one(struct pci_dev *);
1189 static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
1190 static void qib_shutdown_one(struct pci_dev *);
1191 
1192 #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
1193 #define PFX QIB_DRV_NAME ": "
1194 
1195 static const struct pci_device_id qib_pci_tbl[] = {
1196 	{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
1197 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
1198 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
1199 	{ 0, }
1200 };
1201 
1202 MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
1203 
1204 static struct pci_driver qib_driver = {
1205 	.name = QIB_DRV_NAME,
1206 	.probe = qib_init_one,
1207 	.remove = qib_remove_one,
1208 	.shutdown = qib_shutdown_one,
1209 	.id_table = qib_pci_tbl,
1210 	.err_handler = &qib_pci_err_handler,
1211 };
1212 
1213 #ifdef CONFIG_INFINIBAND_QIB_DCA
1214 
1215 static int qib_notify_dca(struct notifier_block *, unsigned long, void *);
1216 static struct notifier_block dca_notifier = {
1217 	.notifier_call  = qib_notify_dca,
1218 	.next           = NULL,
1219 	.priority       = 0
1220 };
1221 
1222 static int qib_notify_dca_device(struct device *device, void *data)
1223 {
1224 	struct qib_devdata *dd = dev_get_drvdata(device);
1225 	unsigned long event = *(unsigned long *)data;
1226 
1227 	return dd->f_notify_dca(dd, event);
1228 }
1229 
1230 static int qib_notify_dca(struct notifier_block *nb, unsigned long event,
1231 					  void *p)
1232 {
1233 	int rval;
1234 
1235 	rval = driver_for_each_device(&qib_driver.driver, NULL,
1236 				      &event, qib_notify_dca_device);
1237 	return rval ? NOTIFY_BAD : NOTIFY_DONE;
1238 }
1239 
1240 #endif
1241 
1242 /*
1243  * Do all the generic driver unit- and chip-independent memory
1244  * allocation and initialization.
1245  */
1246 static int __init qib_ib_init(void)
1247 {
1248 	int ret;
1249 
1250 	ret = qib_dev_init();
1251 	if (ret)
1252 		goto bail;
1253 
1254 	/*
1255 	 * These must be called before the driver is registered with
1256 	 * the PCI subsystem.
1257 	 */
1258 	idr_init(&qib_unit_table);
1259 
1260 #ifdef CONFIG_INFINIBAND_QIB_DCA
1261 	dca_register_notify(&dca_notifier);
1262 #endif
1263 #ifdef CONFIG_DEBUG_FS
1264 	qib_dbg_init();
1265 #endif
1266 	ret = pci_register_driver(&qib_driver);
1267 	if (ret < 0) {
1268 		pr_err("Unable to register driver: error %d\n", -ret);
1269 		goto bail_dev;
1270 	}
1271 
1272 	/* not fatal if it doesn't work */
1273 	if (qib_init_qibfs())
1274 		pr_err("Unable to register ipathfs\n");
1275 	goto bail; /* all OK */
1276 
1277 bail_dev:
1278 #ifdef CONFIG_INFINIBAND_QIB_DCA
1279 	dca_unregister_notify(&dca_notifier);
1280 #endif
1281 #ifdef CONFIG_DEBUG_FS
1282 	qib_dbg_exit();
1283 #endif
1284 	idr_destroy(&qib_unit_table);
1285 	qib_dev_cleanup();
1286 bail:
1287 	return ret;
1288 }
1289 
1290 module_init(qib_ib_init);
1291 
1292 /*
1293  * Do the non-unit driver cleanup, memory free, etc. at unload.
1294  */
1295 static void __exit qib_ib_cleanup(void)
1296 {
1297 	int ret;
1298 
1299 	ret = qib_exit_qibfs();
1300 	if (ret)
1301 		pr_err(
1302 			"Unable to cleanup counter filesystem: error %d\n",
1303 			-ret);
1304 
1305 #ifdef CONFIG_INFINIBAND_QIB_DCA
1306 	dca_unregister_notify(&dca_notifier);
1307 #endif
1308 	pci_unregister_driver(&qib_driver);
1309 #ifdef CONFIG_DEBUG_FS
1310 	qib_dbg_exit();
1311 #endif
1312 
1313 	qib_cpulist_count = 0;
1314 	kfree(qib_cpulist);
1315 
1316 	idr_destroy(&qib_unit_table);
1317 	qib_dev_cleanup();
1318 }
1319 
1320 module_exit(qib_ib_cleanup);
1321 
1322 /* this can only be called after a successful initialization */
1323 static void cleanup_device_data(struct qib_devdata *dd)
1324 {
1325 	int ctxt;
1326 	int pidx;
1327 	struct qib_ctxtdata **tmp;
1328 	unsigned long flags;
1329 
1330 	/* users can't do anything more with chip */
1331 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1332 		if (dd->pport[pidx].statusp)
1333 			*dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
1334 
1335 		spin_lock(&dd->pport[pidx].cc_shadow_lock);
1336 
1337 		kfree(dd->pport[pidx].congestion_entries);
1338 		dd->pport[pidx].congestion_entries = NULL;
1339 		kfree(dd->pport[pidx].ccti_entries);
1340 		dd->pport[pidx].ccti_entries = NULL;
1341 		kfree(dd->pport[pidx].ccti_entries_shadow);
1342 		dd->pport[pidx].ccti_entries_shadow = NULL;
1343 		kfree(dd->pport[pidx].congestion_entries_shadow);
1344 		dd->pport[pidx].congestion_entries_shadow = NULL;
1345 
1346 		spin_unlock(&dd->pport[pidx].cc_shadow_lock);
1347 	}
1348 
1349 	qib_disable_wc(dd);
1350 
1351 	if (dd->pioavailregs_dma) {
1352 		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1353 				  (void *) dd->pioavailregs_dma,
1354 				  dd->pioavailregs_phys);
1355 		dd->pioavailregs_dma = NULL;
1356 	}
1357 
1358 	if (dd->pageshadow) {
1359 		struct page **tmpp = dd->pageshadow;
1360 		dma_addr_t *tmpd = dd->physshadow;
1361 		int i;
1362 
1363 		for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
1364 			int ctxt_tidbase = ctxt * dd->rcvtidcnt;
1365 			int maxtid = ctxt_tidbase + dd->rcvtidcnt;
1366 
1367 			for (i = ctxt_tidbase; i < maxtid; i++) {
1368 				if (!tmpp[i])
1369 					continue;
1370 				pci_unmap_page(dd->pcidev, tmpd[i],
1371 					       PAGE_SIZE, PCI_DMA_FROMDEVICE);
1372 				qib_release_user_pages(&tmpp[i], 1);
1373 				tmpp[i] = NULL;
1374 			}
1375 		}
1376 
1377 		dd->pageshadow = NULL;
1378 		vfree(tmpp);
1379 		dd->physshadow = NULL;
1380 		vfree(tmpd);
1381 	}
1382 
1383 	/*
1384 	 * Free any resources still in use (usually just kernel contexts)
1385 	 * at unload; we do for ctxtcnt, because that's what we allocate.
1386 	 * We acquire lock to be really paranoid that rcd isn't being
1387 	 * accessed from some interrupt-related code (that should not happen,
1388 	 * but best to be sure).
1389 	 */
1390 	spin_lock_irqsave(&dd->uctxt_lock, flags);
1391 	tmp = dd->rcd;
1392 	dd->rcd = NULL;
1393 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1394 	for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
1395 		struct qib_ctxtdata *rcd = tmp[ctxt];
1396 
1397 		tmp[ctxt] = NULL; /* debugging paranoia */
1398 		qib_free_ctxtdata(dd, rcd);
1399 	}
1400 	kfree(tmp);
1401 }
1402 
1403 /*
1404  * Clean up on unit shutdown, or error during unit load after
1405  * successful initialization.
1406  */
1407 static void qib_postinit_cleanup(struct qib_devdata *dd)
1408 {
1409 	/*
1410 	 * Clean up chip-specific stuff.
1411 	 * We check for NULL here, because it's outside
1412 	 * the kregbase check, and we need to call it
1413 	 * after the free_irq.  Thus it's possible that
1414 	 * the function pointers were never initialized.
1415 	 */
1416 	if (dd->f_cleanup)
1417 		dd->f_cleanup(dd);
1418 
1419 	qib_pcie_ddcleanup(dd);
1420 
1421 	cleanup_device_data(dd);
1422 
1423 	qib_free_devdata(dd);
1424 }
1425 
1426 static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1427 {
1428 	int ret, j, pidx, initfail;
1429 	struct qib_devdata *dd = NULL;
1430 
1431 	ret = qib_pcie_init(pdev, ent);
1432 	if (ret)
1433 		goto bail;
1434 
1435 	/*
1436 	 * Do device-specific initialiation, function table setup, dd
1437 	 * allocation, etc.
1438 	 */
1439 	switch (ent->device) {
1440 	case PCI_DEVICE_ID_QLOGIC_IB_6120:
1441 #ifdef CONFIG_PCI_MSI
1442 		dd = qib_init_iba6120_funcs(pdev, ent);
1443 #else
1444 		qib_early_err(&pdev->dev,
1445 			"Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
1446 			ent->device);
1447 		dd = ERR_PTR(-ENODEV);
1448 #endif
1449 		break;
1450 
1451 	case PCI_DEVICE_ID_QLOGIC_IB_7220:
1452 		dd = qib_init_iba7220_funcs(pdev, ent);
1453 		break;
1454 
1455 	case PCI_DEVICE_ID_QLOGIC_IB_7322:
1456 		dd = qib_init_iba7322_funcs(pdev, ent);
1457 		break;
1458 
1459 	default:
1460 		qib_early_err(&pdev->dev,
1461 			"Failing on unknown Intel deviceid 0x%x\n",
1462 			ent->device);
1463 		ret = -ENODEV;
1464 	}
1465 
1466 	if (IS_ERR(dd))
1467 		ret = PTR_ERR(dd);
1468 	if (ret)
1469 		goto bail; /* error already printed */
1470 
1471 	ret = qib_create_workqueues(dd);
1472 	if (ret)
1473 		goto bail;
1474 
1475 	/* do the generic initialization */
1476 	initfail = qib_init(dd, 0);
1477 
1478 	ret = qib_register_ib_device(dd);
1479 
1480 	/*
1481 	 * Now ready for use.  this should be cleared whenever we
1482 	 * detect a reset, or initiate one.  If earlier failure,
1483 	 * we still create devices, so diags, etc. can be used
1484 	 * to determine cause of problem.
1485 	 */
1486 	if (!qib_mini_init && !initfail && !ret)
1487 		dd->flags |= QIB_INITTED;
1488 
1489 	j = qib_device_create(dd);
1490 	if (j)
1491 		qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1492 	j = qibfs_add(dd);
1493 	if (j)
1494 		qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
1495 			    -j);
1496 
1497 	if (qib_mini_init || initfail || ret) {
1498 		qib_stop_timers(dd);
1499 		flush_workqueue(ib_wq);
1500 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
1501 			dd->f_quiet_serdes(dd->pport + pidx);
1502 		if (qib_mini_init)
1503 			goto bail;
1504 		if (!j) {
1505 			(void) qibfs_remove(dd);
1506 			qib_device_remove(dd);
1507 		}
1508 		if (!ret)
1509 			qib_unregister_ib_device(dd);
1510 		qib_postinit_cleanup(dd);
1511 		if (initfail)
1512 			ret = initfail;
1513 		goto bail;
1514 	}
1515 
1516 	ret = qib_enable_wc(dd);
1517 	if (ret) {
1518 		qib_dev_err(dd,
1519 			"Write combining not enabled (err %d): performance may be poor\n",
1520 			-ret);
1521 		ret = 0;
1522 	}
1523 
1524 	qib_verify_pioperf(dd);
1525 bail:
1526 	return ret;
1527 }
1528 
1529 static void qib_remove_one(struct pci_dev *pdev)
1530 {
1531 	struct qib_devdata *dd = pci_get_drvdata(pdev);
1532 	int ret;
1533 
1534 	/* unregister from IB core */
1535 	qib_unregister_ib_device(dd);
1536 
1537 	/*
1538 	 * Disable the IB link, disable interrupts on the device,
1539 	 * clear dma engines, etc.
1540 	 */
1541 	if (!qib_mini_init)
1542 		qib_shutdown_device(dd);
1543 
1544 	qib_stop_timers(dd);
1545 
1546 	/* wait until all of our (qsfp) queue_work() calls complete */
1547 	flush_workqueue(ib_wq);
1548 
1549 	ret = qibfs_remove(dd);
1550 	if (ret)
1551 		qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
1552 			    -ret);
1553 
1554 	qib_device_remove(dd);
1555 
1556 	qib_postinit_cleanup(dd);
1557 }
1558 
1559 static void qib_shutdown_one(struct pci_dev *pdev)
1560 {
1561 	struct qib_devdata *dd = pci_get_drvdata(pdev);
1562 
1563 	qib_shutdown_device(dd);
1564 }
1565 
1566 /**
1567  * qib_create_rcvhdrq - create a receive header queue
1568  * @dd: the qlogic_ib device
1569  * @rcd: the context data
1570  *
1571  * This must be contiguous memory (from an i/o perspective), and must be
1572  * DMA'able (which means for some systems, it will go through an IOMMU,
1573  * or be forced into a low address range).
1574  */
1575 int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
1576 {
1577 	unsigned amt;
1578 	int old_node_id;
1579 
1580 	if (!rcd->rcvhdrq) {
1581 		dma_addr_t phys_hdrqtail;
1582 		gfp_t gfp_flags;
1583 
1584 		amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1585 			    sizeof(u32), PAGE_SIZE);
1586 		gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1587 			GFP_USER : GFP_KERNEL;
1588 
1589 		old_node_id = dev_to_node(&dd->pcidev->dev);
1590 		set_dev_node(&dd->pcidev->dev, rcd->node_id);
1591 		rcd->rcvhdrq = dma_alloc_coherent(
1592 			&dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
1593 			gfp_flags | __GFP_COMP);
1594 		set_dev_node(&dd->pcidev->dev, old_node_id);
1595 
1596 		if (!rcd->rcvhdrq) {
1597 			qib_dev_err(dd,
1598 				"attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1599 				amt, rcd->ctxt);
1600 			goto bail;
1601 		}
1602 
1603 		if (rcd->ctxt >= dd->first_user_ctxt) {
1604 			rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
1605 			if (!rcd->user_event_mask)
1606 				goto bail_free_hdrq;
1607 		}
1608 
1609 		if (!(dd->flags & QIB_NODMA_RTAIL)) {
1610 			set_dev_node(&dd->pcidev->dev, rcd->node_id);
1611 			rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
1612 				&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1613 				gfp_flags);
1614 			set_dev_node(&dd->pcidev->dev, old_node_id);
1615 			if (!rcd->rcvhdrtail_kvaddr)
1616 				goto bail_free;
1617 			rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
1618 		}
1619 
1620 		rcd->rcvhdrq_size = amt;
1621 	}
1622 
1623 	/* clear for security and sanity on each use */
1624 	memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
1625 	if (rcd->rcvhdrtail_kvaddr)
1626 		memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1627 	return 0;
1628 
1629 bail_free:
1630 	qib_dev_err(dd,
1631 		"attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1632 		rcd->ctxt);
1633 	vfree(rcd->user_event_mask);
1634 	rcd->user_event_mask = NULL;
1635 bail_free_hdrq:
1636 	dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1637 			  rcd->rcvhdrq_phys);
1638 	rcd->rcvhdrq = NULL;
1639 bail:
1640 	return -ENOMEM;
1641 }
1642 
1643 /**
1644  * allocate eager buffers, both kernel and user contexts.
1645  * @rcd: the context we are setting up.
1646  *
1647  * Allocate the eager TID buffers and program them into hip.
1648  * They are no longer completely contiguous, we do multiple allocation
1649  * calls.  Otherwise we get the OOM code involved, by asking for too
1650  * much per call, with disastrous results on some kernels.
1651  */
1652 int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
1653 {
1654 	struct qib_devdata *dd = rcd->dd;
1655 	unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
1656 	size_t size;
1657 	gfp_t gfp_flags;
1658 	int old_node_id;
1659 
1660 	/*
1661 	 * GFP_USER, but without GFP_FS, so buffer cache can be
1662 	 * coalesced (we hope); otherwise, even at order 4,
1663 	 * heavy filesystem activity makes these fail, and we can
1664 	 * use compound pages.
1665 	 */
1666 	gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1667 
1668 	egrcnt = rcd->rcvegrcnt;
1669 	egroff = rcd->rcvegr_tid_base;
1670 	egrsize = dd->rcvegrbufsize;
1671 
1672 	chunk = rcd->rcvegrbuf_chunks;
1673 	egrperchunk = rcd->rcvegrbufs_perchunk;
1674 	size = rcd->rcvegrbuf_size;
1675 	if (!rcd->rcvegrbuf) {
1676 		rcd->rcvegrbuf =
1677 			kcalloc_node(chunk, sizeof(rcd->rcvegrbuf[0]),
1678 				     GFP_KERNEL, rcd->node_id);
1679 		if (!rcd->rcvegrbuf)
1680 			goto bail;
1681 	}
1682 	if (!rcd->rcvegrbuf_phys) {
1683 		rcd->rcvegrbuf_phys =
1684 			kmalloc_array_node(chunk,
1685 					   sizeof(rcd->rcvegrbuf_phys[0]),
1686 					   GFP_KERNEL, rcd->node_id);
1687 		if (!rcd->rcvegrbuf_phys)
1688 			goto bail_rcvegrbuf;
1689 	}
1690 	for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
1691 		if (rcd->rcvegrbuf[e])
1692 			continue;
1693 
1694 		old_node_id = dev_to_node(&dd->pcidev->dev);
1695 		set_dev_node(&dd->pcidev->dev, rcd->node_id);
1696 		rcd->rcvegrbuf[e] =
1697 			dma_alloc_coherent(&dd->pcidev->dev, size,
1698 					   &rcd->rcvegrbuf_phys[e],
1699 					   gfp_flags);
1700 		set_dev_node(&dd->pcidev->dev, old_node_id);
1701 		if (!rcd->rcvegrbuf[e])
1702 			goto bail_rcvegrbuf_phys;
1703 	}
1704 
1705 	rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
1706 
1707 	for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
1708 		dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
1709 		unsigned i;
1710 
1711 		/* clear for security and sanity on each use */
1712 		memset(rcd->rcvegrbuf[chunk], 0, size);
1713 
1714 		for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
1715 			dd->f_put_tid(dd, e + egroff +
1716 					  (u64 __iomem *)
1717 					  ((char __iomem *)
1718 					   dd->kregbase +
1719 					   dd->rcvegrbase),
1720 					  RCVHQ_RCV_TYPE_EAGER, pa);
1721 			pa += egrsize;
1722 		}
1723 		cond_resched(); /* don't hog the cpu */
1724 	}
1725 
1726 	return 0;
1727 
1728 bail_rcvegrbuf_phys:
1729 	for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
1730 		dma_free_coherent(&dd->pcidev->dev, size,
1731 				  rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
1732 	kfree(rcd->rcvegrbuf_phys);
1733 	rcd->rcvegrbuf_phys = NULL;
1734 bail_rcvegrbuf:
1735 	kfree(rcd->rcvegrbuf);
1736 	rcd->rcvegrbuf = NULL;
1737 bail:
1738 	return -ENOMEM;
1739 }
1740 
1741 /*
1742  * Note: Changes to this routine should be mirrored
1743  * for the diagnostics routine qib_remap_ioaddr32().
1744  * There is also related code for VL15 buffers in qib_init_7322_variables().
1745  * The teardown code that unmaps is in qib_pcie_ddcleanup()
1746  */
1747 int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
1748 {
1749 	u64 __iomem *qib_kregbase = NULL;
1750 	void __iomem *qib_piobase = NULL;
1751 	u64 __iomem *qib_userbase = NULL;
1752 	u64 qib_kreglen;
1753 	u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
1754 	u64 qib_pio4koffset = dd->piobufbase >> 32;
1755 	u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
1756 	u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
1757 	u64 qib_physaddr = dd->physaddr;
1758 	u64 qib_piolen;
1759 	u64 qib_userlen = 0;
1760 
1761 	/*
1762 	 * Free the old mapping because the kernel will try to reuse the
1763 	 * old mapping and not create a new mapping with the
1764 	 * write combining attribute.
1765 	 */
1766 	iounmap(dd->kregbase);
1767 	dd->kregbase = NULL;
1768 
1769 	/*
1770 	 * Assumes chip address space looks like:
1771 	 *	- kregs + sregs + cregs + uregs (in any order)
1772 	 *	- piobufs (2K and 4K bufs in either order)
1773 	 * or:
1774 	 *	- kregs + sregs + cregs (in any order)
1775 	 *	- piobufs (2K and 4K bufs in either order)
1776 	 *	- uregs
1777 	 */
1778 	if (dd->piobcnt4k == 0) {
1779 		qib_kreglen = qib_pio2koffset;
1780 		qib_piolen = qib_pio2klen;
1781 	} else if (qib_pio2koffset < qib_pio4koffset) {
1782 		qib_kreglen = qib_pio2koffset;
1783 		qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
1784 	} else {
1785 		qib_kreglen = qib_pio4koffset;
1786 		qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
1787 	}
1788 	qib_piolen += vl15buflen;
1789 	/* Map just the configured ports (not all hw ports) */
1790 	if (dd->uregbase > qib_kreglen)
1791 		qib_userlen = dd->ureg_align * dd->cfgctxts;
1792 
1793 	/* Sanity checks passed, now create the new mappings */
1794 	qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
1795 	if (!qib_kregbase)
1796 		goto bail;
1797 
1798 	qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
1799 	if (!qib_piobase)
1800 		goto bail_kregbase;
1801 
1802 	if (qib_userlen) {
1803 		qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
1804 					       qib_userlen);
1805 		if (!qib_userbase)
1806 			goto bail_piobase;
1807 	}
1808 
1809 	dd->kregbase = qib_kregbase;
1810 	dd->kregend = (u64 __iomem *)
1811 		((char __iomem *) qib_kregbase + qib_kreglen);
1812 	dd->piobase = qib_piobase;
1813 	dd->pio2kbase = (void __iomem *)
1814 		(((char __iomem *) dd->piobase) +
1815 		 qib_pio2koffset - qib_kreglen);
1816 	if (dd->piobcnt4k)
1817 		dd->pio4kbase = (void __iomem *)
1818 			(((char __iomem *) dd->piobase) +
1819 			 qib_pio4koffset - qib_kreglen);
1820 	if (qib_userlen)
1821 		/* ureg will now be accessed relative to dd->userbase */
1822 		dd->userbase = qib_userbase;
1823 	return 0;
1824 
1825 bail_piobase:
1826 	iounmap(qib_piobase);
1827 bail_kregbase:
1828 	iounmap(qib_kregbase);
1829 bail:
1830 	return -ENOMEM;
1831 }
1832