Lines Matching +full:re +full:- +full:initialization

3  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
77 "0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process");
93 MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min =…
105 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); in qib_set_ctxtcnt()
106 if (dd->cfgctxts > dd->ctxtcnt) in qib_set_ctxtcnt()
107 dd->cfgctxts = dd->ctxtcnt; in qib_set_ctxtcnt()
108 } else if (qib_cfgctxts < dd->num_pports) in qib_set_ctxtcnt()
109 dd->cfgctxts = dd->ctxtcnt; in qib_set_ctxtcnt()
110 else if (qib_cfgctxts <= dd->ctxtcnt) in qib_set_ctxtcnt()
111 dd->cfgctxts = qib_cfgctxts; in qib_set_ctxtcnt()
113 dd->cfgctxts = dd->ctxtcnt; in qib_set_ctxtcnt()
114 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 : in qib_set_ctxtcnt()
115 dd->cfgctxts - dd->first_user_ctxt; in qib_set_ctxtcnt()
124 int local_node_id = pcibus_to_node(dd->pcidev->bus); in qib_create_ctxts()
128 dd->assigned_node_id = local_node_id; in qib_create_ctxts()
134 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL); in qib_create_ctxts()
135 if (!dd->rcd) in qib_create_ctxts()
136 return -ENOMEM; in qib_create_ctxts()
139 for (i = 0; i < dd->first_user_ctxt; ++i) { in qib_create_ctxts()
143 if (dd->skip_kctxt_mask & (1 << i)) in qib_create_ctxts()
146 ppd = dd->pport + (i % dd->num_pports); in qib_create_ctxts()
148 rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id); in qib_create_ctxts()
152 kfree(dd->rcd); in qib_create_ctxts()
153 dd->rcd = NULL; in qib_create_ctxts()
154 return -ENOMEM; in qib_create_ctxts()
156 rcd->pkeys[0] = QIB_DEFAULT_P_KEY; in qib_create_ctxts()
157 rcd->seq_cnt = 1; in qib_create_ctxts()
168 struct qib_devdata *dd = ppd->dd; in qib_create_ctxtdata()
173 INIT_LIST_HEAD(&rcd->qp_wait_list); in qib_create_ctxtdata()
174 rcd->node_id = node_id; in qib_create_ctxtdata()
175 rcd->ppd = ppd; in qib_create_ctxtdata()
176 rcd->dd = dd; in qib_create_ctxtdata()
177 rcd->cnt = 1; in qib_create_ctxtdata()
178 rcd->ctxt = ctxt; in qib_create_ctxtdata()
179 dd->rcd[ctxt] = rcd; in qib_create_ctxtdata()
181 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ in qib_create_ctxtdata()
182 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), in qib_create_ctxtdata()
184 if (!rcd->opstats) { in qib_create_ctxtdata()
192 dd->f_init_ctxt(rcd); in qib_create_ctxtdata()
205 rcd->rcvegrbuf_size = 0x8000; in qib_create_ctxtdata()
206 rcd->rcvegrbufs_perchunk = in qib_create_ctxtdata()
207 rcd->rcvegrbuf_size / dd->rcvegrbufsize; in qib_create_ctxtdata()
208 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + in qib_create_ctxtdata()
209 rcd->rcvegrbufs_perchunk - 1) / in qib_create_ctxtdata()
210 rcd->rcvegrbufs_perchunk; in qib_create_ctxtdata()
211 rcd->rcvegrbufs_perchunk_shift = in qib_create_ctxtdata()
212 ilog2(rcd->rcvegrbufs_perchunk); in qib_create_ctxtdata()
225 ppd->dd = dd; in qib_init_pportdata()
226 ppd->hw_pidx = hw_pidx; in qib_init_pportdata()
227 ppd->port = port; /* IB port number, not index */ in qib_init_pportdata()
229 spin_lock_init(&ppd->sdma_lock); in qib_init_pportdata()
230 spin_lock_init(&ppd->lflags_lock); in qib_init_pportdata()
231 spin_lock_init(&ppd->cc_shadow_lock); in qib_init_pportdata()
232 init_waitqueue_head(&ppd->state_wait); in qib_init_pportdata()
234 timer_setup(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup, 0); in qib_init_pportdata()
236 ppd->qib_wq = NULL; in qib_init_pportdata()
237 ppd->ibport_data.pmastats = in qib_init_pportdata()
239 if (!ppd->ibport_data.pmastats) in qib_init_pportdata()
240 return -ENOMEM; in qib_init_pportdata()
241 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); in qib_init_pportdata()
242 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); in qib_init_pportdata()
243 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); in qib_init_pportdata()
244 if (!(ppd->ibport_data.rvp.rc_acks) || in qib_init_pportdata()
245 !(ppd->ibport_data.rvp.rc_qacks) || in qib_init_pportdata()
246 !(ppd->ibport_data.rvp.rc_delayed_comp)) in qib_init_pportdata()
247 return -ENOMEM; in qib_init_pportdata()
252 ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size, in qib_init_pportdata()
255 ppd->cc_max_table_entries = in qib_init_pportdata()
256 ppd->cc_supported_table_entries/IB_CCT_ENTRIES; in qib_init_pportdata()
260 ppd->ccti_entries = kzalloc(size, GFP_KERNEL); in qib_init_pportdata()
261 if (!ppd->ccti_entries) in qib_init_pportdata()
265 ppd->congestion_entries = kzalloc(size, GFP_KERNEL); in qib_init_pportdata()
266 if (!ppd->congestion_entries) in qib_init_pportdata()
270 ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL); in qib_init_pportdata()
271 if (!ppd->ccti_entries_shadow) in qib_init_pportdata()
275 ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL); in qib_init_pportdata()
276 if (!ppd->congestion_entries_shadow) in qib_init_pportdata()
282 kfree(ppd->ccti_entries_shadow); in qib_init_pportdata()
283 ppd->ccti_entries_shadow = NULL; in qib_init_pportdata()
285 kfree(ppd->congestion_entries); in qib_init_pportdata()
286 ppd->congestion_entries = NULL; in qib_init_pportdata()
288 kfree(ppd->ccti_entries); in qib_init_pportdata()
289 ppd->ccti_entries = NULL; in qib_init_pportdata()
312 dd->pioavailregs_dma = dma_alloc_coherent( in init_pioavailregs()
313 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, in init_pioavailregs()
315 if (!dd->pioavailregs_dma) { in init_pioavailregs()
318 ret = -ENOMEM; in init_pioavailregs()
327 ((char *) dd->pioavailregs_dma + in init_pioavailregs()
329 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); in init_pioavailregs()
331 dd->devstatusp = status_page; in init_pioavailregs()
333 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in init_pioavailregs()
334 dd->pport[pidx].statusp = status_page; in init_pioavailregs()
340 * apps, following statusp. This is per-unit, not per port. in init_pioavailregs()
342 dd->freezemsg = (char *) status_page; in init_pioavailregs()
343 *dd->freezemsg = 0; in init_pioavailregs()
345 ret = (char *) status_page - (char *) dd->pioavailregs_dma; in init_pioavailregs()
346 dd->freezelen = PAGE_SIZE - ret; in init_pioavailregs()
355 * init_shadow_tids - allocate the shadow TID array
371 dd->cfgctxts * dd->rcvtidcnt)); in init_shadow_tids()
376 dd->cfgctxts * dd->rcvtidcnt)); in init_shadow_tids()
380 dd->pageshadow = pages; in init_shadow_tids()
381 dd->physshadow = addrs; in init_shadow_tids()
387 dd->pageshadow = NULL; in init_shadow_tids()
391 * Do initialization for device that is only needed on
398 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & in loadtime_init()
403 (int)(dd->revision >> in loadtime_init()
406 (unsigned long long) dd->revision); in loadtime_init()
407 ret = -ENOSYS; in loadtime_init()
411 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK) in loadtime_init()
412 qib_devinfo(dd->pcidev, "%s", dd->boardversion); in loadtime_init()
414 spin_lock_init(&dd->pioavail_lock); in loadtime_init()
415 spin_lock_init(&dd->sendctrl_lock); in loadtime_init()
416 spin_lock_init(&dd->uctxt_lock); in loadtime_init()
417 spin_lock_init(&dd->qib_diag_trans_lock); in loadtime_init()
418 spin_lock_init(&dd->eep_st_lock); in loadtime_init()
419 mutex_init(&dd->eep_lock); in loadtime_init()
430 timer_setup(&dd->intrchk_timer, verify_interrupt, 0); in loadtime_init()
436 * init_after_reset - re-initialize after a reset
449 * pioavail updates while we re-initialize. This is mostly in init_after_reset()
452 for (i = 0; i < dd->num_pports; ++i) { in init_after_reset()
454 * ctxt == -1 means "all contexts". Only really safe for in init_after_reset()
457 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS | in init_after_reset()
459 QIB_RCVCTRL_TAILUPD_DIS, -1); in init_after_reset()
461 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS | in init_after_reset()
476 for (i = 0; i < dd->num_pports; ++i) in enable_chip()
477 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB | in enable_chip()
484 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ? in enable_chip()
486 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { in enable_chip()
487 struct qib_ctxtdata *rcd = dd->rcd[i]; in enable_chip()
490 dd->f_rcvctrl(rcd->ppd, rcvmask, i); in enable_chip()
506 int_counter = qib_int_counter(dd) - dd->z_int_counter; in verify_interrupt()
508 if (!dd->f_intr_fallback(dd)) in verify_interrupt()
509 dev_err(&dd->pcidev->dev, in verify_interrupt()
511 else /* re-arm the timer to see if fallback works */ in verify_interrupt()
512 mod_timer(&dd->intrchk_timer, jiffies + HZ/2); in verify_interrupt()
529 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL); in init_piobuf_state()
530 for (pidx = 0; pidx < dd->num_pports; ++pidx) in init_piobuf_state()
531 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH); in init_piobuf_state()
536 * calculated in chip-specific code because it may cause some in init_piobuf_state()
537 * chip-specific adjustments to be made. in init_piobuf_state()
539 uctxts = dd->cfgctxts - dd->first_user_ctxt; in init_piobuf_state()
540 dd->ctxts_extrabuf = dd->pbufsctxt ? in init_piobuf_state()
541 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0; in init_piobuf_state()
552 for (i = 0; i < dd->pioavregs; i++) { in init_piobuf_state()
555 tmp = dd->pioavailregs_dma[i]; in init_piobuf_state()
559 * in initialization, to busy out buffers as needed. in init_piobuf_state()
561 dd->pioavailshadow[i] = le64_to_cpu(tmp); in init_piobuf_state()
563 while (i < ARRAY_SIZE(dd->pioavailshadow)) in init_piobuf_state()
564 dd->pioavailshadow[i++] = 0; /* for debugging sanity */ in init_piobuf_state()
567 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k, in init_piobuf_state()
569 dd->f_initvl15_bufs(dd); in init_piobuf_state()
573 * qib_create_workqueues - create per port workqueues
581 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_create_workqueues()
582 ppd = dd->pport + pidx; in qib_create_workqueues()
583 if (!ppd->qib_wq) { in qib_create_workqueues()
584 ppd->qib_wq = alloc_ordered_workqueue("qib%d_%d", in qib_create_workqueues()
586 dd->unit, pidx); in qib_create_workqueues()
587 if (!ppd->qib_wq) in qib_create_workqueues()
595 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_create_workqueues()
596 ppd = dd->pport + pidx; in qib_create_workqueues()
597 if (ppd->qib_wq) { in qib_create_workqueues()
598 destroy_workqueue(ppd->qib_wq); in qib_create_workqueues()
599 ppd->qib_wq = NULL; in qib_create_workqueues()
602 return -ENOMEM; in qib_create_workqueues()
607 free_percpu(ppd->ibport_data.pmastats); in qib_free_pportdata()
608 free_percpu(ppd->ibport_data.rvp.rc_acks); in qib_free_pportdata()
609 free_percpu(ppd->ibport_data.rvp.rc_qacks); in qib_free_pportdata()
610 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp); in qib_free_pportdata()
611 ppd->ibport_data.pmastats = NULL; in qib_free_pportdata()
615 * qib_init - do the actual initialization sequence on the chip
619 * Do the actual initialization sequence on the chip. This is done
622 * or it's administratively re-enabled.
626 * without memory allocation, we need to re-write all the chip registers
639 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_init()
640 ppd = dd->pport + pidx; in qib_init()
641 spin_lock_irqsave(&ppd->lflags_lock, flags); in qib_init()
642 ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED | in qib_init()
645 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qib_init()
655 /* Bypass most chip-init, to get to device creation */ in qib_init()
659 ret = dd->f_late_initreg(dd); in qib_init()
663 /* dd->rcd can be NULL if early init failed */ in qib_init()
664 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { in qib_init()
667 * re-init, the simplest way to handle this is to free in qib_init()
668 * existing, and re-allocate. in qib_init()
669 * Need to re-create rest of ctxt 0 ctxtdata as well. in qib_init()
671 rcd = dd->rcd[i]; in qib_init()
683 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_init()
688 ppd = dd->pport + pidx; in qib_init()
690 if (mtu == -1) { in qib_init()
695 ppd->init_ibmaxlen = min(mtu > 2048 ? in qib_init()
696 dd->piosize4k : dd->piosize2k, in qib_init()
697 dd->rcvegrbufsize + in qib_init()
698 (dd->rcvhdrentsize << 2)); in qib_init()
703 ppd->ibmaxlen = ppd->init_ibmaxlen; in qib_init()
706 spin_lock_irqsave(&ppd->lflags_lock, flags); in qib_init()
707 ppd->lflags |= QIBL_IB_LINK_DISABLED; in qib_init()
708 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qib_init()
710 lastfail = dd->f_bringup_serdes(ppd); in qib_init()
712 qib_devinfo(dd->pcidev, in qib_init()
713 "Failed to bringup IB port %u\n", ppd->port); in qib_init()
714 lastfail = -ENETDOWN; in qib_init()
726 ret = -ENETDOWN; in qib_init()
737 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_init()
738 ppd = dd->pport + pidx; in qib_init()
743 *ppd->statusp |= QIB_STATUS_CHIP_PRESENT | in qib_init()
745 if (!ppd->link_speed_enabled) in qib_init()
747 if (dd->flags & QIB_HAS_SEND_DMA) in qib_init()
749 timer_setup(&ppd->hol_timer, qib_hol_event, 0); in qib_init()
750 ppd->hol_state = QIB_HOL_UP; in qib_init()
754 dd->f_set_intr_state(dd, 1); in qib_init()
760 mod_timer(&dd->intrchk_timer, jiffies + HZ/2); in qib_init()
762 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); in qib_init()
765 /* if ret is non-zero, we probably should do some cleanup here... */ in qib_init()
770 * These next two routines are placeholders in case we don't have per-arch
777 return -EOPNOTSUPP; in qib_enable_wc()
791 * in initialization.
798 if (dd->stats_timer.function) in qib_stop_timers()
799 del_timer_sync(&dd->stats_timer); in qib_stop_timers()
800 if (dd->intrchk_timer.function) in qib_stop_timers()
801 del_timer_sync(&dd->intrchk_timer); in qib_stop_timers()
802 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_stop_timers()
803 ppd = dd->pport + pidx; in qib_stop_timers()
804 if (ppd->hol_timer.function) in qib_stop_timers()
805 del_timer_sync(&ppd->hol_timer); in qib_stop_timers()
806 if (ppd->led_override_timer.function) { in qib_stop_timers()
807 del_timer_sync(&ppd->led_override_timer); in qib_stop_timers()
808 atomic_set(&ppd->led_override_timer_active, 0); in qib_stop_timers()
810 if (ppd->symerr_clear_timer.function) in qib_stop_timers()
811 del_timer_sync(&ppd->symerr_clear_timer); in qib_stop_timers()
816 * qib_shutdown_device - shut down a device
829 if (dd->flags & QIB_SHUTDOWN) in qib_shutdown_device()
831 dd->flags |= QIB_SHUTDOWN; in qib_shutdown_device()
833 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_shutdown_device()
834 ppd = dd->pport + pidx; in qib_shutdown_device()
836 spin_lock_irq(&ppd->lflags_lock); in qib_shutdown_device()
837 ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT | in qib_shutdown_device()
840 spin_unlock_irq(&ppd->lflags_lock); in qib_shutdown_device()
841 *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY); in qib_shutdown_device()
843 dd->flags &= ~QIB_INITTED; in qib_shutdown_device()
846 dd->f_set_intr_state(dd, 0); in qib_shutdown_device()
848 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_shutdown_device()
849 ppd = dd->pport + pidx; in qib_shutdown_device()
850 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS | in qib_shutdown_device()
853 QIB_RCVCTRL_PKEY_ENB, -1); in qib_shutdown_device()
858 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR); in qib_shutdown_device()
867 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_shutdown_device()
868 ppd = dd->pport + pidx; in qib_shutdown_device()
869 dd->f_setextled(ppd, 0); /* make sure LEDs are off */ in qib_shutdown_device()
871 if (dd->flags & QIB_HAS_SEND_DMA) in qib_shutdown_device()
874 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS | in qib_shutdown_device()
880 dd->f_quiet_serdes(ppd); in qib_shutdown_device()
882 if (ppd->qib_wq) { in qib_shutdown_device()
883 destroy_workqueue(ppd->qib_wq); in qib_shutdown_device()
884 ppd->qib_wq = NULL; in qib_shutdown_device()
892 * qib_free_ctxtdata - free a context's allocated data
898 * re-allocation of context data, because it is called after qib_mutex
907 if (rcd->rcvhdrq) { in qib_free_ctxtdata()
908 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, in qib_free_ctxtdata()
909 rcd->rcvhdrq, rcd->rcvhdrq_phys); in qib_free_ctxtdata()
910 rcd->rcvhdrq = NULL; in qib_free_ctxtdata()
911 if (rcd->rcvhdrtail_kvaddr) { in qib_free_ctxtdata()
912 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, in qib_free_ctxtdata()
913 rcd->rcvhdrtail_kvaddr, in qib_free_ctxtdata()
914 rcd->rcvhdrqtailaddr_phys); in qib_free_ctxtdata()
915 rcd->rcvhdrtail_kvaddr = NULL; in qib_free_ctxtdata()
918 if (rcd->rcvegrbuf) { in qib_free_ctxtdata()
921 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { in qib_free_ctxtdata()
922 void *base = rcd->rcvegrbuf[e]; in qib_free_ctxtdata()
923 size_t size = rcd->rcvegrbuf_size; in qib_free_ctxtdata()
925 dma_free_coherent(&dd->pcidev->dev, size, in qib_free_ctxtdata()
926 base, rcd->rcvegrbuf_phys[e]); in qib_free_ctxtdata()
928 kfree(rcd->rcvegrbuf); in qib_free_ctxtdata()
929 rcd->rcvegrbuf = NULL; in qib_free_ctxtdata()
930 kfree(rcd->rcvegrbuf_phys); in qib_free_ctxtdata()
931 rcd->rcvegrbuf_phys = NULL; in qib_free_ctxtdata()
932 rcd->rcvegrbuf_chunks = 0; in qib_free_ctxtdata()
935 kfree(rcd->tid_pg_list); in qib_free_ctxtdata()
936 vfree(rcd->user_event_mask); in qib_free_ctxtdata()
937 vfree(rcd->subctxt_uregbase); in qib_free_ctxtdata()
938 vfree(rcd->subctxt_rcvegrbuf); in qib_free_ctxtdata()
939 vfree(rcd->subctxt_rcvhdr_base); in qib_free_ctxtdata()
941 kfree(rcd->opstats); in qib_free_ctxtdata()
942 rcd->opstats = NULL; in qib_free_ctxtdata()
955 * data bandwidth to the wire). On chips that use an address-based
967 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum); in qib_verify_pioperf()
969 qib_devinfo(dd->pcidev, in qib_verify_pioperf()
993 dd->f_set_armlaunch(dd, 0); in qib_verify_pioperf()
1009 emsecs = jiffies_to_msecs(jiffies) - msecs; in qib_verify_pioperf()
1024 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum)); in qib_verify_pioperf()
1026 dd->f_set_armlaunch(dd, 1); in qib_verify_pioperf()
1034 __xa_erase(&qib_dev_table, dd->unit); in qib_free_devdata()
1038 qib_dbg_ibdev_exit(&dd->verbs_dev); in qib_free_devdata()
1040 free_percpu(dd->int_counter); in qib_free_devdata()
1041 rvt_dealloc_device(&dd->verbs_dev.rdi); in qib_free_devdata()
1050 int_counter += *per_cpu_ptr(dd->int_counter, cpu); in qib_int_counter()
1069 * Allocate our primary per-unit data structure. Must be done via verbs
1072 * "extra" is for chip-specific data.
1084 return ERR_PTR(-ENOMEM); in qib_alloc_devdata()
1086 ret = xa_alloc_irq(&qib_dev_table, &dd->unit, dd, xa_limit_32b, in qib_alloc_devdata()
1089 qib_early_err(&pdev->dev, in qib_alloc_devdata()
1090 "Could not allocate unit ID: error %d\n", -ret); in qib_alloc_devdata()
1093 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit); in qib_alloc_devdata()
1095 dd->int_counter = alloc_percpu(u64); in qib_alloc_devdata()
1096 if (!dd->int_counter) { in qib_alloc_devdata()
1097 ret = -ENOMEM; in qib_alloc_devdata()
1098 qib_early_err(&pdev->dev, in qib_alloc_devdata()
1099 "Could not allocate per-cpu int_counter\n"); in qib_alloc_devdata()
1111 qib_dbg_ibdev_init(&dd->verbs_dev); in qib_alloc_devdata()
1115 if (!list_empty(&dd->list)) in qib_alloc_devdata()
1116 list_del_init(&dd->list); in qib_alloc_devdata()
1117 rvt_dealloc_device(&dd->verbs_dev.rdi); in qib_alloc_devdata()
1128 if (dd->flags & QIB_INITTED) { in qib_disable_after_error()
1131 dd->flags &= ~QIB_INITTED; in qib_disable_after_error()
1132 if (dd->pport) in qib_disable_after_error()
1133 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_disable_after_error()
1136 ppd = dd->pport + pidx; in qib_disable_after_error()
1137 if (dd->flags & QIB_PRESENT) { in qib_disable_after_error()
1140 dd->f_setextled(ppd, 0); in qib_disable_after_error()
1142 *ppd->statusp &= ~QIB_STATUS_IB_READY; in qib_disable_after_error()
1151 if (dd->devstatusp) in qib_disable_after_error()
1152 *dd->devstatusp |= QIB_STATUS_HWERROR; in qib_disable_after_error()
1194 return dd->f_notify_dca(dd, event); in qib_notify_dca_device()
1210 * Do all the generic driver unit- and chip-independent memory
1211 * allocation and initialization.
1233 pr_err("Unable to register driver: error %d\n", -ret); in qib_ib_init()
1257 * Do the non-unit driver cleanup, memory free, etc. at unload.
1267 -ret); in qib_ib_cleanup()
1286 /* this can only be called after a successful initialization */
1295 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in cleanup_device_data()
1296 if (dd->pport[pidx].statusp) in cleanup_device_data()
1297 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; in cleanup_device_data()
1299 spin_lock(&dd->pport[pidx].cc_shadow_lock); in cleanup_device_data()
1301 kfree(dd->pport[pidx].congestion_entries); in cleanup_device_data()
1302 dd->pport[pidx].congestion_entries = NULL; in cleanup_device_data()
1303 kfree(dd->pport[pidx].ccti_entries); in cleanup_device_data()
1304 dd->pport[pidx].ccti_entries = NULL; in cleanup_device_data()
1305 kfree(dd->pport[pidx].ccti_entries_shadow); in cleanup_device_data()
1306 dd->pport[pidx].ccti_entries_shadow = NULL; in cleanup_device_data()
1307 kfree(dd->pport[pidx].congestion_entries_shadow); in cleanup_device_data()
1308 dd->pport[pidx].congestion_entries_shadow = NULL; in cleanup_device_data()
1310 spin_unlock(&dd->pport[pidx].cc_shadow_lock); in cleanup_device_data()
1315 if (dd->pioavailregs_dma) { in cleanup_device_data()
1316 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, in cleanup_device_data()
1317 (void *) dd->pioavailregs_dma, in cleanup_device_data()
1318 dd->pioavailregs_phys); in cleanup_device_data()
1319 dd->pioavailregs_dma = NULL; in cleanup_device_data()
1322 if (dd->pageshadow) { in cleanup_device_data()
1323 struct page **tmpp = dd->pageshadow; in cleanup_device_data()
1324 dma_addr_t *tmpd = dd->physshadow; in cleanup_device_data()
1327 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { in cleanup_device_data()
1328 int ctxt_tidbase = ctxt * dd->rcvtidcnt; in cleanup_device_data()
1329 int maxtid = ctxt_tidbase + dd->rcvtidcnt; in cleanup_device_data()
1334 dma_unmap_page(&dd->pcidev->dev, tmpd[i], in cleanup_device_data()
1341 dd->pageshadow = NULL; in cleanup_device_data()
1343 dd->physshadow = NULL; in cleanup_device_data()
1351 * accessed from some interrupt-related code (that should not happen, in cleanup_device_data()
1354 spin_lock_irqsave(&dd->uctxt_lock, flags); in cleanup_device_data()
1355 tmp = dd->rcd; in cleanup_device_data()
1356 dd->rcd = NULL; in cleanup_device_data()
1357 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in cleanup_device_data()
1358 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { in cleanup_device_data()
1369 * successful initialization.
1374 * Clean up chip-specific stuff. in qib_postinit_cleanup()
1380 if (dd->f_cleanup) in qib_postinit_cleanup()
1381 dd->f_cleanup(dd); in qib_postinit_cleanup()
1400 * Do device-specific initialiation, function table setup, dd in qib_init_one()
1403 switch (ent->device) { in qib_init_one()
1408 qib_early_err(&pdev->dev, in qib_init_one()
1410 ent->device); in qib_init_one()
1411 dd = ERR_PTR(-ENODEV); in qib_init_one()
1424 qib_early_err(&pdev->dev, in qib_init_one()
1426 ent->device); in qib_init_one()
1427 ret = -ENODEV; in qib_init_one()
1439 /* do the generic initialization */ in qib_init_one()
1451 dd->flags |= QIB_INITTED; in qib_init_one()
1455 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j); in qib_init_one()
1459 -j); in qib_init_one()
1464 for (pidx = 0; pidx < dd->num_pports; ++pidx) in qib_init_one()
1465 dd->f_quiet_serdes(dd->pport + pidx); in qib_init_one()
1484 -ret); in qib_init_one()
1516 -ret); in qib_remove_one()
1531 * qib_create_rcvhdrq - create a receive header queue
1544 if (!rcd->rcvhdrq) { in qib_create_rcvhdrq()
1547 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * in qib_create_rcvhdrq()
1550 old_node_id = dev_to_node(&dd->pcidev->dev); in qib_create_rcvhdrq()
1551 set_dev_node(&dd->pcidev->dev, rcd->node_id); in qib_create_rcvhdrq()
1552 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, in qib_create_rcvhdrq()
1553 &rcd->rcvhdrq_phys, GFP_KERNEL); in qib_create_rcvhdrq()
1554 set_dev_node(&dd->pcidev->dev, old_node_id); in qib_create_rcvhdrq()
1556 if (!rcd->rcvhdrq) { in qib_create_rcvhdrq()
1559 amt, rcd->ctxt); in qib_create_rcvhdrq()
1563 if (rcd->ctxt >= dd->first_user_ctxt) { in qib_create_rcvhdrq()
1564 rcd->user_event_mask = vmalloc_user(PAGE_SIZE); in qib_create_rcvhdrq()
1565 if (!rcd->user_event_mask) in qib_create_rcvhdrq()
1569 if (!(dd->flags & QIB_NODMA_RTAIL)) { in qib_create_rcvhdrq()
1570 set_dev_node(&dd->pcidev->dev, rcd->node_id); in qib_create_rcvhdrq()
1571 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent( in qib_create_rcvhdrq()
1572 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, in qib_create_rcvhdrq()
1574 set_dev_node(&dd->pcidev->dev, old_node_id); in qib_create_rcvhdrq()
1575 if (!rcd->rcvhdrtail_kvaddr) in qib_create_rcvhdrq()
1577 rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; in qib_create_rcvhdrq()
1580 rcd->rcvhdrq_size = amt; in qib_create_rcvhdrq()
1584 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size); in qib_create_rcvhdrq()
1585 if (rcd->rcvhdrtail_kvaddr) in qib_create_rcvhdrq()
1586 memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE); in qib_create_rcvhdrq()
1592 rcd->ctxt); in qib_create_rcvhdrq()
1593 vfree(rcd->user_event_mask); in qib_create_rcvhdrq()
1594 rcd->user_event_mask = NULL; in qib_create_rcvhdrq()
1596 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, in qib_create_rcvhdrq()
1597 rcd->rcvhdrq_phys); in qib_create_rcvhdrq()
1598 rcd->rcvhdrq = NULL; in qib_create_rcvhdrq()
1600 return -ENOMEM; in qib_create_rcvhdrq()
1604 * qib_setup_eagerbufs - allocate eager buffers, both kernel and user contexts.
1614 struct qib_devdata *dd = rcd->dd; in qib_setup_eagerbufs()
1619 egrcnt = rcd->rcvegrcnt; in qib_setup_eagerbufs()
1620 egroff = rcd->rcvegr_tid_base; in qib_setup_eagerbufs()
1621 egrsize = dd->rcvegrbufsize; in qib_setup_eagerbufs()
1623 chunk = rcd->rcvegrbuf_chunks; in qib_setup_eagerbufs()
1624 egrperchunk = rcd->rcvegrbufs_perchunk; in qib_setup_eagerbufs()
1625 size = rcd->rcvegrbuf_size; in qib_setup_eagerbufs()
1626 if (!rcd->rcvegrbuf) { in qib_setup_eagerbufs()
1627 rcd->rcvegrbuf = in qib_setup_eagerbufs()
1628 kcalloc_node(chunk, sizeof(rcd->rcvegrbuf[0]), in qib_setup_eagerbufs()
1629 GFP_KERNEL, rcd->node_id); in qib_setup_eagerbufs()
1630 if (!rcd->rcvegrbuf) in qib_setup_eagerbufs()
1633 if (!rcd->rcvegrbuf_phys) { in qib_setup_eagerbufs()
1634 rcd->rcvegrbuf_phys = in qib_setup_eagerbufs()
1636 sizeof(rcd->rcvegrbuf_phys[0]), in qib_setup_eagerbufs()
1637 GFP_KERNEL, rcd->node_id); in qib_setup_eagerbufs()
1638 if (!rcd->rcvegrbuf_phys) in qib_setup_eagerbufs()
1641 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { in qib_setup_eagerbufs()
1642 if (rcd->rcvegrbuf[e]) in qib_setup_eagerbufs()
1645 old_node_id = dev_to_node(&dd->pcidev->dev); in qib_setup_eagerbufs()
1646 set_dev_node(&dd->pcidev->dev, rcd->node_id); in qib_setup_eagerbufs()
1647 rcd->rcvegrbuf[e] = in qib_setup_eagerbufs()
1648 dma_alloc_coherent(&dd->pcidev->dev, size, in qib_setup_eagerbufs()
1649 &rcd->rcvegrbuf_phys[e], in qib_setup_eagerbufs()
1651 set_dev_node(&dd->pcidev->dev, old_node_id); in qib_setup_eagerbufs()
1652 if (!rcd->rcvegrbuf[e]) in qib_setup_eagerbufs()
1656 rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0]; in qib_setup_eagerbufs()
1658 for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) { in qib_setup_eagerbufs()
1659 dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; in qib_setup_eagerbufs()
1663 memset(rcd->rcvegrbuf[chunk], 0, size); in qib_setup_eagerbufs()
1666 dd->f_put_tid(dd, e + egroff + in qib_setup_eagerbufs()
1669 dd->kregbase + in qib_setup_eagerbufs()
1670 dd->rcvegrbase), in qib_setup_eagerbufs()
1680 for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++) in qib_setup_eagerbufs()
1681 dma_free_coherent(&dd->pcidev->dev, size, in qib_setup_eagerbufs()
1682 rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]); in qib_setup_eagerbufs()
1683 kfree(rcd->rcvegrbuf_phys); in qib_setup_eagerbufs()
1684 rcd->rcvegrbuf_phys = NULL; in qib_setup_eagerbufs()
1686 kfree(rcd->rcvegrbuf); in qib_setup_eagerbufs()
1687 rcd->rcvegrbuf = NULL; in qib_setup_eagerbufs()
1689 return -ENOMEM; in qib_setup_eagerbufs()
1704 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff; in init_chip_wc_pat()
1705 u64 qib_pio4koffset = dd->piobufbase >> 32; in init_chip_wc_pat()
1706 u64 qib_pio2klen = dd->piobcnt2k * dd->palign; in init_chip_wc_pat()
1707 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k; in init_chip_wc_pat()
1708 u64 qib_physaddr = dd->physaddr; in init_chip_wc_pat()
1717 iounmap(dd->kregbase); in init_chip_wc_pat()
1718 dd->kregbase = NULL; in init_chip_wc_pat()
1722 * - kregs + sregs + cregs + uregs (in any order) in init_chip_wc_pat()
1723 * - piobufs (2K and 4K bufs in either order) in init_chip_wc_pat()
1725 * - kregs + sregs + cregs (in any order) in init_chip_wc_pat()
1726 * - piobufs (2K and 4K bufs in either order) in init_chip_wc_pat()
1727 * - uregs in init_chip_wc_pat()
1729 if (dd->piobcnt4k == 0) { in init_chip_wc_pat()
1734 qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen; in init_chip_wc_pat()
1737 qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen; in init_chip_wc_pat()
1741 if (dd->uregbase > qib_kreglen) in init_chip_wc_pat()
1742 qib_userlen = dd->ureg_align * dd->cfgctxts; in init_chip_wc_pat()
1754 qib_userbase = ioremap(qib_physaddr + dd->uregbase, in init_chip_wc_pat()
1760 dd->kregbase = qib_kregbase; in init_chip_wc_pat()
1761 dd->kregend = (u64 __iomem *) in init_chip_wc_pat()
1763 dd->piobase = qib_piobase; in init_chip_wc_pat()
1764 dd->pio2kbase = (void __iomem *) in init_chip_wc_pat()
1765 (((char __iomem *) dd->piobase) + in init_chip_wc_pat()
1766 qib_pio2koffset - qib_kreglen); in init_chip_wc_pat()
1767 if (dd->piobcnt4k) in init_chip_wc_pat()
1768 dd->pio4kbase = (void __iomem *) in init_chip_wc_pat()
1769 (((char __iomem *) dd->piobase) + in init_chip_wc_pat()
1770 qib_pio4koffset - qib_kreglen); in init_chip_wc_pat()
1772 /* ureg will now be accessed relative to dd->userbase */ in init_chip_wc_pat()
1773 dd->userbase = qib_userbase; in init_chip_wc_pat()
1781 return -ENOMEM; in init_chip_wc_pat()