Lines Matching +full:re +full:- +full:initialization

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright(c) 2015 - 2020 Intel Corporation.
50 int num_user_contexts = -1;
53 …num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT)…
58 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
77 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)…
94 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); in hfi1_create_kctxt()
105 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | in hfi1_create_kctxt()
111 if (rcd->ctxt == HFI1_CTRL_CTXT) in hfi1_create_kctxt()
112 rcd->flags |= HFI1_CAP_DMA_RTAIL; in hfi1_create_kctxt()
113 rcd->fast_handler = get_dma_rtail_setting(rcd) ? in hfi1_create_kctxt()
119 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); in hfi1_create_kctxt()
120 if (!rcd->sc) { in hfi1_create_kctxt()
122 return -ENOMEM; in hfi1_create_kctxt()
124 hfi1_init_ctxt(rcd->sc); in hfi1_create_kctxt()
137 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), in hfi1_create_kctxts()
138 GFP_KERNEL, dd->node); in hfi1_create_kctxts()
139 if (!dd->rcd) in hfi1_create_kctxts()
140 return -ENOMEM; in hfi1_create_kctxts()
142 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { in hfi1_create_kctxts()
143 ret = hfi1_create_kctxt(dd, dd->pport); in hfi1_create_kctxts()
150 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) in hfi1_create_kctxts()
151 hfi1_free_ctxt(dd->rcd[i]); in hfi1_create_kctxts()
154 kfree(dd->rcd); in hfi1_create_kctxts()
155 dd->rcd = NULL; in hfi1_create_kctxts()
164 kref_init(&rcd->kref); in hfi1_rcd_init()
168 * hfi1_rcd_free - When reference is zero clean up.
178 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); in hfi1_rcd_free()
179 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free()
180 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); in hfi1_rcd_free()
182 hfi1_free_ctxtdata(rcd->dd, rcd); in hfi1_rcd_free()
188 * hfi1_rcd_put - decrement reference for rcd
196 return kref_put(&rcd->kref, hfi1_rcd_free); in hfi1_rcd_put()
202 * hfi1_rcd_get - increment reference for rcd
207 * Return : reflect kref_get_unless_zero(), which returns non-zero on
212 return kref_get_unless_zero(&rcd->kref); in hfi1_rcd_get()
216 * allocate_rcd_index - allocate an rcd index from the rcd array
231 spin_lock_irqsave(&dd->uctxt_lock, flags); in allocate_rcd_index()
232 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) in allocate_rcd_index()
233 if (!dd->rcd[ctxt]) in allocate_rcd_index()
236 if (ctxt < dd->num_rcv_contexts) { in allocate_rcd_index()
237 rcd->ctxt = ctxt; in allocate_rcd_index()
238 dd->rcd[ctxt] = rcd; in allocate_rcd_index()
241 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in allocate_rcd_index()
243 if (ctxt >= dd->num_rcv_contexts) in allocate_rcd_index()
244 return -EBUSY; in allocate_rcd_index()
252 * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
266 if (ctxt < dd->num_rcv_contexts) in hfi1_rcd_get_by_index_safe()
273 * hfi1_rcd_get_by_index - get by index
289 spin_lock_irqsave(&dd->uctxt_lock, flags); in hfi1_rcd_get_by_index()
290 if (dd->rcd[ctxt]) { in hfi1_rcd_get_by_index()
291 rcd = dd->rcd[ctxt]; in hfi1_rcd_get_by_index()
295 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in hfi1_rcd_get_by_index()
307 struct hfi1_devdata *dd = ppd->dd; in hfi1_create_ctxtdata()
312 if (dd->rcv_entries.nctxt_extra > in hfi1_create_ctxtdata()
313 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) in hfi1_create_ctxtdata()
314 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - in hfi1_create_ctxtdata()
315 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); in hfi1_create_ctxtdata()
329 INIT_LIST_HEAD(&rcd->qp_wait_list); in hfi1_create_ctxtdata()
331 rcd->ppd = ppd; in hfi1_create_ctxtdata()
332 rcd->dd = dd; in hfi1_create_ctxtdata()
333 rcd->numa_id = numa; in hfi1_create_ctxtdata()
334 rcd->rcv_array_groups = dd->rcv_entries.ngroups; in hfi1_create_ctxtdata()
335 rcd->rhf_rcv_function_map = normal_rhf_rcv_functions; in hfi1_create_ctxtdata()
336 rcd->slow_handler = handle_receive_interrupt; in hfi1_create_ctxtdata()
337 rcd->do_interrupt = rcd->slow_handler; in hfi1_create_ctxtdata()
338 rcd->msix_intr = CCE_NUM_MSIX_VECTORS; in hfi1_create_ctxtdata()
340 mutex_init(&rcd->exp_mutex); in hfi1_create_ctxtdata()
341 spin_lock_init(&rcd->exp_lock); in hfi1_create_ctxtdata()
342 INIT_LIST_HEAD(&rcd->flow_queue.queue_head); in hfi1_create_ctxtdata()
343 INIT_LIST_HEAD(&rcd->rarr_queue.queue_head); in hfi1_create_ctxtdata()
345 hfi1_cdbg(PROC, "setting up context %u", rcd->ctxt); in hfi1_create_ctxtdata()
354 if (ctxt < dd->first_dyn_alloc_ctxt) { in hfi1_create_ctxtdata()
356 base = ctxt * (dd->rcv_entries.ngroups + 1); in hfi1_create_ctxtdata()
357 rcd->rcv_array_groups++; in hfi1_create_ctxtdata()
360 (ctxt * dd->rcv_entries.ngroups); in hfi1_create_ctxtdata()
363 u16 ct = ctxt - dd->first_dyn_alloc_ctxt; in hfi1_create_ctxtdata()
365 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + in hfi1_create_ctxtdata()
367 if (ct < dd->rcv_entries.nctxt_extra) { in hfi1_create_ctxtdata()
368 base += ct * (dd->rcv_entries.ngroups + 1); in hfi1_create_ctxtdata()
369 rcd->rcv_array_groups++; in hfi1_create_ctxtdata()
371 base += dd->rcv_entries.nctxt_extra + in hfi1_create_ctxtdata()
372 (ct * dd->rcv_entries.ngroups); in hfi1_create_ctxtdata()
375 rcd->eager_base = base * dd->rcv_entries.group_size; in hfi1_create_ctxtdata()
377 rcd->rcvhdrq_cnt = rcvhdrcnt; in hfi1_create_ctxtdata()
378 rcd->rcvhdrqentsize = hfi1_hdrq_entsize; in hfi1_create_ctxtdata()
379 rcd->rhf_offset = in hfi1_create_ctxtdata()
380 rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32); in hfi1_create_ctxtdata()
382 * Simple Eager buffer allocation: we have already pre-allocated in hfi1_create_ctxtdata()
392 max_entries = rcd->rcv_array_groups * in hfi1_create_ctxtdata()
393 dd->rcv_entries.group_size; in hfi1_create_ctxtdata()
395 rcd->egrbufs.count = round_down(rcvtids, in hfi1_create_ctxtdata()
396 dd->rcv_entries.group_size); in hfi1_create_ctxtdata()
397 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { in hfi1_create_ctxtdata()
399 rcd->ctxt); in hfi1_create_ctxtdata()
400 rcd->egrbufs.count = MAX_EAGER_ENTRIES; in hfi1_create_ctxtdata()
404 rcd->ctxt, rcd->egrbufs.count); in hfi1_create_ctxtdata()
412 * multiple of dd->rcv_entries.group_size. in hfi1_create_ctxtdata()
414 rcd->egrbufs.buffers = in hfi1_create_ctxtdata()
415 kcalloc_node(rcd->egrbufs.count, in hfi1_create_ctxtdata()
416 sizeof(*rcd->egrbufs.buffers), in hfi1_create_ctxtdata()
418 if (!rcd->egrbufs.buffers) in hfi1_create_ctxtdata()
420 rcd->egrbufs.rcvtids = in hfi1_create_ctxtdata()
421 kcalloc_node(rcd->egrbufs.count, in hfi1_create_ctxtdata()
422 sizeof(*rcd->egrbufs.rcvtids), in hfi1_create_ctxtdata()
424 if (!rcd->egrbufs.rcvtids) in hfi1_create_ctxtdata()
426 rcd->egrbufs.size = eager_buffer_size; in hfi1_create_ctxtdata()
432 if (rcd->egrbufs.size < hfi1_max_mtu) { in hfi1_create_ctxtdata()
433 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); in hfi1_create_ctxtdata()
436 rcd->ctxt, rcd->egrbufs.size); in hfi1_create_ctxtdata()
438 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; in hfi1_create_ctxtdata()
441 if (ctxt < dd->first_dyn_alloc_ctxt) { in hfi1_create_ctxtdata()
442 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), in hfi1_create_ctxtdata()
444 if (!rcd->opstats) in hfi1_create_ctxtdata()
458 return -ENOMEM; in hfi1_create_ctxtdata()
462 * hfi1_free_ctxt - free context
477 * Select the largest ccti value over all SLs to determine the intra-
485 struct hfi1_devdata *dd = ppd->dd; in set_link_ipg()
502 * This should _never_ happen - rcu_read_lock() is held, in set_link_ipg()
509 u16 ccti = ppd->cca_timer[i].ccti; in set_link_ipg()
515 ccti_limit = cc_state->cct.ccti_limit; in set_link_ipg()
519 cce = cc_state->cct.entries[max_ccti].entry; in set_link_ipg()
525 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); in set_link_ipg()
546 ppd = cca_timer->ppd; in cca_timer_fn()
547 sl = cca_timer->sl; in cca_timer_fn()
564 ccti_min = cc_state->cong_setting.entries[sl].ccti_min; in cca_timer_fn()
565 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; in cca_timer_fn()
567 spin_lock_irqsave(&ppd->cca_timer_lock, flags); in cca_timer_fn()
569 if (cca_timer->ccti > ccti_min) { in cca_timer_fn()
570 cca_timer->ccti--; in cca_timer_fn()
574 if (cca_timer->ccti > ccti_min) { in cca_timer_fn()
581 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); in cca_timer_fn()
596 ppd->dd = dd; in hfi1_init_pportdata()
597 ppd->hw_pidx = hw_pidx; in hfi1_init_pportdata()
598 ppd->port = port; /* IB port number, not index */ in hfi1_init_pportdata()
599 ppd->prev_link_width = LINK_WIDTH_DEFAULT; in hfi1_init_pportdata()
605 ppd->port_vl_xmit_wait_last[i] = 0; in hfi1_init_pportdata()
606 ppd->vl_xmit_flit_cnt[i] = 0; in hfi1_init_pportdata()
611 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; in hfi1_init_pportdata()
612 ppd->part_enforce |= HFI1_PART_ENFORCE_IN; in hfi1_init_pportdata()
613 ppd->pkeys[0] = 0x8001; in hfi1_init_pportdata()
615 INIT_WORK(&ppd->link_vc_work, handle_verify_cap); in hfi1_init_pportdata()
616 INIT_WORK(&ppd->link_up_work, handle_link_up); in hfi1_init_pportdata()
617 INIT_WORK(&ppd->link_down_work, handle_link_down); in hfi1_init_pportdata()
618 INIT_WORK(&ppd->freeze_work, handle_freeze); in hfi1_init_pportdata()
619 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); in hfi1_init_pportdata()
620 INIT_WORK(&ppd->sma_message_work, handle_sma_message); in hfi1_init_pportdata()
621 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); in hfi1_init_pportdata()
622 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link); in hfi1_init_pportdata()
623 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); in hfi1_init_pportdata()
624 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); in hfi1_init_pportdata()
626 mutex_init(&ppd->hls_lock); in hfi1_init_pportdata()
627 spin_lock_init(&ppd->qsfp_info.qsfp_lock); in hfi1_init_pportdata()
629 ppd->qsfp_info.ppd = ppd; in hfi1_init_pportdata()
630 ppd->sm_trap_qp = 0x0; in hfi1_init_pportdata()
631 ppd->sa_qp = 0x1; in hfi1_init_pportdata()
633 ppd->hfi1_wq = NULL; in hfi1_init_pportdata()
635 spin_lock_init(&ppd->cca_timer_lock); in hfi1_init_pportdata()
638 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, in hfi1_init_pportdata()
640 ppd->cca_timer[i].ppd = ppd; in hfi1_init_pportdata()
641 ppd->cca_timer[i].sl = i; in hfi1_init_pportdata()
642 ppd->cca_timer[i].ccti = 0; in hfi1_init_pportdata()
643 ppd->cca_timer[i].hrtimer.function = cca_timer_fn; in hfi1_init_pportdata()
646 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; in hfi1_init_pportdata()
648 spin_lock_init(&ppd->cc_state_lock); in hfi1_init_pportdata()
649 spin_lock_init(&ppd->cc_log_lock); in hfi1_init_pportdata()
651 RCU_INIT_POINTER(ppd->cc_state, cc_state); in hfi1_init_pportdata()
661 * Do initialization for device that is only needed on
670 * init_after_reset - re-initialize after a reset
683 * pioavail updates while we re-initialize. This is mostly in init_after_reset()
686 for (i = 0; i < dd->num_rcv_contexts; i++) { in init_after_reset()
694 for (i = 0; i < dd->num_send_contexts; i++) in init_after_reset()
695 sc_disable(dd->send_contexts[i].sc); in init_after_reset()
713 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { in enable_chip()
718 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? in enable_chip()
720 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) in enable_chip()
722 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL)) in enable_chip()
724 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL)) in enable_chip()
729 sc_enable(rcd->sc); in enable_chip()
735 * create_workqueues - create per port workqueues
743 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in create_workqueues()
744 ppd = dd->pport + pidx; in create_workqueues()
745 if (!ppd->hfi1_wq) { in create_workqueues()
746 ppd->hfi1_wq = in create_workqueues()
752 dd->unit, pidx); in create_workqueues()
753 if (!ppd->hfi1_wq) in create_workqueues()
756 if (!ppd->link_wq) { in create_workqueues()
758 * Make the link workqueue single-threaded to enforce in create_workqueues()
761 ppd->link_wq = in create_workqueues()
766 dd->unit, pidx); in create_workqueues()
767 if (!ppd->link_wq) in create_workqueues()
774 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in create_workqueues()
775 ppd = dd->pport + pidx; in create_workqueues()
776 if (ppd->hfi1_wq) { in create_workqueues()
777 destroy_workqueue(ppd->hfi1_wq); in create_workqueues()
778 ppd->hfi1_wq = NULL; in create_workqueues()
780 if (ppd->link_wq) { in create_workqueues()
781 destroy_workqueue(ppd->link_wq); in create_workqueues()
782 ppd->link_wq = NULL; in create_workqueues()
785 return -ENOMEM; in create_workqueues()
789 * destroy_workqueues - destroy per port workqueues
797 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in destroy_workqueues()
798 ppd = dd->pport + pidx; in destroy_workqueues()
800 if (ppd->hfi1_wq) { in destroy_workqueues()
801 destroy_workqueue(ppd->hfi1_wq); in destroy_workqueues()
802 ppd->hfi1_wq = NULL; in destroy_workqueues()
804 if (ppd->link_wq) { in destroy_workqueues()
805 destroy_workqueue(ppd->link_wq); in destroy_workqueues()
806 ppd->link_wq = NULL; in destroy_workqueues()
812 * enable_general_intr() - Enable the IRQs that will be handled by the
829 * hfi1_init - do the actual initialization sequence on the chip
831 * @reinit: re-initializing, so don't allocate new memory
833 * Do the actual initialization sequence on the chip. This is done
836 * or it's administratively re-enabled.
840 * without memory allocation, we need to re-write all the chip registers
852 dd->process_pio_send = hfi1_verbs_send_pio; in hfi1_init()
853 dd->process_dma_send = hfi1_verbs_send_dma; in hfi1_init()
854 dd->pio_inline_send = pio_copy; in hfi1_init()
855 dd->process_vnic_dma_send = hfi1_vnic_send_dma; in hfi1_init()
858 atomic_set(&dd->drop_packet, DROP_PACKET_ON); in hfi1_init()
859 dd->do_drop = true; in hfi1_init()
861 atomic_set(&dd->drop_packet, DROP_PACKET_OFF); in hfi1_init()
862 dd->do_drop = false; in hfi1_init()
866 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_init()
867 ppd = dd->pport + pidx; in hfi1_init()
868 ppd->linkup = 0; in hfi1_init()
878 /* dd->rcd can be NULL if early initialization failed */ in hfi1_init()
879 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { in hfi1_init()
882 * re-init, the simplest way to handle this is to free in hfi1_init()
883 * existing, and re-allocate. in hfi1_init()
884 * Need to re-create rest of ctxt 0 ctxtdata as well. in hfi1_init()
906 sizeof(*dd->events)); in hfi1_init()
907 dd->events = vmalloc_user(len); in hfi1_init()
908 if (!dd->events) in hfi1_init()
914 dd->status = vmalloc_user(PAGE_SIZE); in hfi1_init()
915 if (!dd->status) in hfi1_init()
917 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_init()
918 ppd = dd->pport + pidx; in hfi1_init()
919 if (dd->status) in hfi1_init()
921 ppd->statusp = &dd->status->port; in hfi1_init()
934 if (dd->status) in hfi1_init()
935 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | in hfi1_init()
943 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_init()
944 ppd = dd->pport + pidx; in hfi1_init()
947 * start the serdes - must be after interrupts are in hfi1_init()
954 ppd->port); in hfi1_init()
960 if (ppd->statusp) in hfi1_init()
961 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | in hfi1_init()
963 if (!ppd->link_speed_enabled) in hfi1_init()
968 /* if ret is non-zero, we probably should do some cleanup here... */ in hfi1_init()
979 * in initialization.
986 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in stop_timers()
987 ppd = dd->pport + pidx; in stop_timers()
988 if (ppd->led_override_timer.function) { in stop_timers()
989 del_timer_sync(&ppd->led_override_timer); in stop_timers()
990 atomic_set(&ppd->led_override_timer_active, 0); in stop_timers()
996 * shutdown_device - shut down a device
1011 if (dd->flags & HFI1_SHUTDOWN) in shutdown_device()
1013 dd->flags |= HFI1_SHUTDOWN; in shutdown_device()
1015 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in shutdown_device()
1016 ppd = dd->pport + pidx; in shutdown_device()
1018 ppd->linkup = 0; in shutdown_device()
1019 if (ppd->statusp) in shutdown_device()
1020 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | in shutdown_device()
1023 dd->flags &= ~HFI1_INITTED; in shutdown_device()
1029 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in shutdown_device()
1030 for (i = 0; i < dd->num_rcv_contexts; i++) { in shutdown_device()
1043 for (i = 0; i < dd->num_send_contexts; i++) in shutdown_device()
1044 sc_flush(dd->send_contexts[i].sc); in shutdown_device()
1053 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in shutdown_device()
1054 ppd = dd->pport + pidx; in shutdown_device()
1057 for (i = 0; i < dd->num_send_contexts; i++) in shutdown_device()
1058 sc_disable(dd->send_contexts[i].sc); in shutdown_device()
1069 if (ppd->hfi1_wq) in shutdown_device()
1070 flush_workqueue(ppd->hfi1_wq); in shutdown_device()
1071 if (ppd->link_wq) in shutdown_device()
1072 flush_workqueue(ppd->link_wq); in shutdown_device()
1078 * hfi1_free_ctxtdata - free a context's allocated data
1092 if (rcd->rcvhdrq) { in hfi1_free_ctxtdata()
1093 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd), in hfi1_free_ctxtdata()
1094 rcd->rcvhdrq, rcd->rcvhdrq_dma); in hfi1_free_ctxtdata()
1095 rcd->rcvhdrq = NULL; in hfi1_free_ctxtdata()
1097 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, in hfi1_free_ctxtdata()
1099 rcd->rcvhdrqtailaddr_dma); in hfi1_free_ctxtdata()
1100 rcd->rcvhdrtail_kvaddr = NULL; in hfi1_free_ctxtdata()
1105 kfree(rcd->egrbufs.rcvtids); in hfi1_free_ctxtdata()
1106 rcd->egrbufs.rcvtids = NULL; in hfi1_free_ctxtdata()
1108 for (e = 0; e < rcd->egrbufs.alloced; e++) { in hfi1_free_ctxtdata()
1109 if (rcd->egrbufs.buffers[e].addr) in hfi1_free_ctxtdata()
1110 dma_free_coherent(&dd->pcidev->dev, in hfi1_free_ctxtdata()
1111 rcd->egrbufs.buffers[e].len, in hfi1_free_ctxtdata()
1112 rcd->egrbufs.buffers[e].addr, in hfi1_free_ctxtdata()
1113 rcd->egrbufs.buffers[e].dma); in hfi1_free_ctxtdata()
1115 kfree(rcd->egrbufs.buffers); in hfi1_free_ctxtdata()
1116 rcd->egrbufs.alloced = 0; in hfi1_free_ctxtdata()
1117 rcd->egrbufs.buffers = NULL; in hfi1_free_ctxtdata()
1119 sc_free(rcd->sc); in hfi1_free_ctxtdata()
1120 rcd->sc = NULL; in hfi1_free_ctxtdata()
1122 vfree(rcd->subctxt_uregbase); in hfi1_free_ctxtdata()
1123 vfree(rcd->subctxt_rcvegrbuf); in hfi1_free_ctxtdata()
1124 vfree(rcd->subctxt_rcvhdr_base); in hfi1_free_ctxtdata()
1125 kfree(rcd->opstats); in hfi1_free_ctxtdata()
1127 rcd->subctxt_uregbase = NULL; in hfi1_free_ctxtdata()
1128 rcd->subctxt_rcvegrbuf = NULL; in hfi1_free_ctxtdata()
1129 rcd->subctxt_rcvhdr_base = NULL; in hfi1_free_ctxtdata()
1130 rcd->opstats = NULL; in hfi1_free_ctxtdata()
1143 if (!dd->asic_data) in release_asic_data()
1145 dd->asic_data->dds[dd->hfi1_id] = NULL; in release_asic_data()
1146 other = dd->hfi1_id ? 0 : 1; in release_asic_data()
1147 ad = dd->asic_data; in release_asic_data()
1148 dd->asic_data = NULL; in release_asic_data()
1150 return ad->dds[other] ? NULL : ad; in release_asic_data()
1161 * hfi1_free_devdata - cleans up and frees per-unit data structure
1173 __xa_erase(&hfi1_dev_table, dd->unit); in hfi1_free_devdata()
1180 free_percpu(dd->int_counter); in hfi1_free_devdata()
1181 free_percpu(dd->rcv_limit); in hfi1_free_devdata()
1182 free_percpu(dd->send_schedule); in hfi1_free_devdata()
1183 free_percpu(dd->tx_opstats); in hfi1_free_devdata()
1184 dd->int_counter = NULL; in hfi1_free_devdata()
1185 dd->rcv_limit = NULL; in hfi1_free_devdata()
1186 dd->send_schedule = NULL; in hfi1_free_devdata()
1187 dd->tx_opstats = NULL; in hfi1_free_devdata()
1188 kfree(dd->comp_vect); in hfi1_free_devdata()
1189 dd->comp_vect = NULL; in hfi1_free_devdata()
1190 if (dd->rcvhdrtail_dummy_kvaddr) in hfi1_free_devdata()
1191 dma_free_coherent(&dd->pcidev->dev, sizeof(u64), in hfi1_free_devdata()
1192 (void *)dd->rcvhdrtail_dummy_kvaddr, in hfi1_free_devdata()
1193 dd->rcvhdrtail_dummy_dma); in hfi1_free_devdata()
1194 dd->rcvhdrtail_dummy_kvaddr = NULL; in hfi1_free_devdata()
1195 sdma_clean(dd, dd->num_sdma); in hfi1_free_devdata()
1196 rvt_dealloc_device(&dd->verbs_dev.rdi); in hfi1_free_devdata()
1200 * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
1206 * "extra" is for chip-specific data.
1220 return ERR_PTR(-ENOMEM); in hfi1_alloc_devdata()
1221 dd->num_pports = nports; in hfi1_alloc_devdata()
1222 dd->pport = (struct hfi1_pportdata *)(dd + 1); in hfi1_alloc_devdata()
1223 dd->pcidev = pdev; in hfi1_alloc_devdata()
1226 ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b, in hfi1_alloc_devdata()
1229 dev_err(&pdev->dev, in hfi1_alloc_devdata()
1230 "Could not allocate unit ID: error %d\n", -ret); in hfi1_alloc_devdata()
1233 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); in hfi1_alloc_devdata()
1238 dd->node = pcibus_to_node(pdev->bus); in hfi1_alloc_devdata()
1239 if (dd->node == NUMA_NO_NODE) { in hfi1_alloc_devdata()
1241 dd->node = 0; in hfi1_alloc_devdata()
1248 spin_lock_init(&dd->sc_lock); in hfi1_alloc_devdata()
1249 spin_lock_init(&dd->sendctrl_lock); in hfi1_alloc_devdata()
1250 spin_lock_init(&dd->rcvctrl_lock); in hfi1_alloc_devdata()
1251 spin_lock_init(&dd->uctxt_lock); in hfi1_alloc_devdata()
1252 spin_lock_init(&dd->hfi1_diag_trans_lock); in hfi1_alloc_devdata()
1253 spin_lock_init(&dd->sc_init_lock); in hfi1_alloc_devdata()
1254 spin_lock_init(&dd->dc8051_memlock); in hfi1_alloc_devdata()
1255 seqlock_init(&dd->sc2vl_lock); in hfi1_alloc_devdata()
1256 spin_lock_init(&dd->sde_map_lock); in hfi1_alloc_devdata()
1257 spin_lock_init(&dd->pio_map_lock); in hfi1_alloc_devdata()
1258 mutex_init(&dd->dc8051_lock); in hfi1_alloc_devdata()
1259 init_waitqueue_head(&dd->event_queue); in hfi1_alloc_devdata()
1260 spin_lock_init(&dd->irq_src_lock); in hfi1_alloc_devdata()
1262 dd->int_counter = alloc_percpu(u64); in hfi1_alloc_devdata()
1263 if (!dd->int_counter) { in hfi1_alloc_devdata()
1264 ret = -ENOMEM; in hfi1_alloc_devdata()
1268 dd->rcv_limit = alloc_percpu(u64); in hfi1_alloc_devdata()
1269 if (!dd->rcv_limit) { in hfi1_alloc_devdata()
1270 ret = -ENOMEM; in hfi1_alloc_devdata()
1274 dd->send_schedule = alloc_percpu(u64); in hfi1_alloc_devdata()
1275 if (!dd->send_schedule) { in hfi1_alloc_devdata()
1276 ret = -ENOMEM; in hfi1_alloc_devdata()
1280 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx); in hfi1_alloc_devdata()
1281 if (!dd->tx_opstats) { in hfi1_alloc_devdata()
1282 ret = -ENOMEM; in hfi1_alloc_devdata()
1286 dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL); in hfi1_alloc_devdata()
1287 if (!dd->comp_vect) { in hfi1_alloc_devdata()
1288 ret = -ENOMEM; in hfi1_alloc_devdata()
1293 dd->rcvhdrtail_dummy_kvaddr = in hfi1_alloc_devdata()
1294 dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64), in hfi1_alloc_devdata()
1295 &dd->rcvhdrtail_dummy_dma, GFP_KERNEL); in hfi1_alloc_devdata()
1296 if (!dd->rcvhdrtail_dummy_kvaddr) { in hfi1_alloc_devdata()
1297 ret = -ENOMEM; in hfi1_alloc_devdata()
1301 atomic_set(&dd->ipoib_rsm_usr_num, 0); in hfi1_alloc_devdata()
1316 if (dd->flags & HFI1_INITTED) { in hfi1_disable_after_error()
1319 dd->flags &= ~HFI1_INITTED; in hfi1_disable_after_error()
1320 if (dd->pport) in hfi1_disable_after_error()
1321 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_disable_after_error()
1324 ppd = dd->pport + pidx; in hfi1_disable_after_error()
1325 if (dd->flags & HFI1_PRESENT) in hfi1_disable_after_error()
1328 if (ppd->statusp) in hfi1_disable_after_error()
1329 *ppd->statusp &= ~HFI1_STATUS_IB_READY; in hfi1_disable_after_error()
1338 if (dd->status) in hfi1_disable_after_error()
1339 dd->status->dev |= HFI1_STATUS_HWERROR; in hfi1_disable_after_error()
1375 * Do all the generic driver unit- and chip-independent memory
1376 * allocation and initialization.
1396 /* valid CUs run from 1-128 in powers of 2 */ in hfi1_mod_init()
1399 /* valid credit return threshold is 0-100, variable is unsigned */ in hfi1_mod_init()
1412 …pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interr… in hfi1_mod_init()
1420 …pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setti… in hfi1_mod_init()
1425 * The dynamic algorithm expects a non-zero timeout in hfi1_mod_init()
1428 …pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turnin… in hfi1_mod_init()
1448 pr_err("Unable to register driver: error %d\n", -ret); in hfi1_mod_init()
1463 * Do the non-unit driver cleanup, memory free, etc. at unload.
1479 /* this can only be called after a successful initialization */
1486 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in cleanup_device_data()
1487 struct hfi1_pportdata *ppd = &dd->pport[pidx]; in cleanup_device_data()
1491 if (ppd->statusp) in cleanup_device_data()
1492 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; in cleanup_device_data()
1495 hrtimer_cancel(&ppd->cca_timer[i].hrtimer); in cleanup_device_data()
1497 spin_lock(&ppd->cc_state_lock); in cleanup_device_data()
1499 RCU_INIT_POINTER(ppd->cc_state, NULL); in cleanup_device_data()
1500 spin_unlock(&ppd->cc_state_lock); in cleanup_device_data()
1512 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { in cleanup_device_data()
1513 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; in cleanup_device_data()
1521 kfree(dd->rcd); in cleanup_device_data()
1522 dd->rcd = NULL; in cleanup_device_data()
1525 /* must follow rcv context free - need to remove rcv's hooks */ in cleanup_device_data()
1526 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) in cleanup_device_data()
1527 sc_free(dd->send_contexts[ctxt].sc); in cleanup_device_data()
1528 dd->num_send_contexts = 0; in cleanup_device_data()
1529 kfree(dd->send_contexts); in cleanup_device_data()
1530 dd->send_contexts = NULL; in cleanup_device_data()
1531 kfree(dd->hw_to_sw); in cleanup_device_data()
1532 dd->hw_to_sw = NULL; in cleanup_device_data()
1533 kfree(dd->boardname); in cleanup_device_data()
1534 vfree(dd->events); in cleanup_device_data()
1535 vfree(dd->status); in cleanup_device_data()
1540 * successful initialization.
1549 hfi1_pcie_cleanup(dd->pcidev); in postinit_cleanup()
1562 /* First, lock the non-writable module parameters */ in init_one()
1566 if (!(ent->device == PCI_DEVICE_ID_INTEL0 || in init_one()
1567 ent->device == PCI_DEVICE_ID_INTEL1)) { in init_one()
1568 dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n", in init_one()
1569 ent->device); in init_one()
1570 ret = -ENODEV; in init_one()
1591 ret = -EINVAL; in init_one()
1599 * allowed by the hardware - all powers of 2 between the min and in init_one()
1616 ret = -EINVAL; in init_one()
1628 * Do device-specific initialization, function table setup, dd in init_one()
1639 /* do the generic initialization */ in init_one()
1651 dd->flags |= HFI1_INITTED; in init_one()
1653 hfi1_dbg_ibdev_init(&dd->verbs_dev); in init_one()
1658 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); in init_one()
1664 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in init_one()
1665 hfi1_quiet_serdes(dd->pport + pidx); in init_one()
1666 ppd = dd->pport + pidx; in init_one()
1667 if (ppd->hfi1_wq) { in init_one()
1668 destroy_workqueue(ppd->hfi1_wq); in init_one()
1669 ppd->hfi1_wq = NULL; in init_one()
1671 if (ppd->link_wq) { in init_one()
1672 destroy_workqueue(ppd->link_wq); in init_one()
1673 ppd->link_wq = NULL; in init_one()
1702 if (refcount_dec_and_test(&dd->user_refcount)) in wait_for_clients()
1703 complete(&dd->user_comp); in wait_for_clients()
1705 wait_for_completion(&dd->user_comp); in wait_for_clients()
1713 hfi1_dbg_ibdev_exit(&dd->verbs_dev); in remove_one()
1750 * hfi1_create_rcvhdrq - create a receive header queue
1762 if (!rcd->rcvhdrq) { in hfi1_create_rcvhdrq()
1765 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, in hfi1_create_rcvhdrq()
1766 &rcd->rcvhdrq_dma, in hfi1_create_rcvhdrq()
1769 if (!rcd->rcvhdrq) { in hfi1_create_rcvhdrq()
1772 amt, rcd->ctxt); in hfi1_create_rcvhdrq()
1776 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || in hfi1_create_rcvhdrq()
1777 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { in hfi1_create_rcvhdrq()
1778 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, in hfi1_create_rcvhdrq()
1780 &rcd->rcvhdrqtailaddr_dma, in hfi1_create_rcvhdrq()
1782 if (!rcd->rcvhdrtail_kvaddr) in hfi1_create_rcvhdrq()
1787 set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize, in hfi1_create_rcvhdrq()
1788 rcd->rcvhdrq_cnt); in hfi1_create_rcvhdrq()
1795 rcd->ctxt); in hfi1_create_rcvhdrq()
1796 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, in hfi1_create_rcvhdrq()
1797 rcd->rcvhdrq_dma); in hfi1_create_rcvhdrq()
1798 rcd->rcvhdrq = NULL; in hfi1_create_rcvhdrq()
1800 return -ENOMEM; in hfi1_create_rcvhdrq()
1804 * hfi1_setup_eagerbufs - llocate eager buffers, both kernel and user
1815 struct hfi1_devdata *dd = rcd->dd; in hfi1_setup_eagerbufs()
1822 * The minimum size of the eager buffers is a groups of MTU-sized in hfi1_setup_eagerbufs()
1828 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) in hfi1_setup_eagerbufs()
1829 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; in hfi1_setup_eagerbufs()
1831 * If using one-pkt-per-egr-buffer, lower the eager buffer in hfi1_setup_eagerbufs()
1832 * size to the max MTU (page-aligned). in hfi1_setup_eagerbufs()
1834 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) in hfi1_setup_eagerbufs()
1835 rcd->egrbufs.rcvtid_size = round_mtu; in hfi1_setup_eagerbufs()
1841 if (rcd->egrbufs.size <= (1 << 20)) in hfi1_setup_eagerbufs()
1842 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, in hfi1_setup_eagerbufs()
1843 rounddown_pow_of_two(rcd->egrbufs.size / 8)); in hfi1_setup_eagerbufs()
1845 while (alloced_bytes < rcd->egrbufs.size && in hfi1_setup_eagerbufs()
1846 rcd->egrbufs.alloced < rcd->egrbufs.count) { in hfi1_setup_eagerbufs()
1847 rcd->egrbufs.buffers[idx].addr = in hfi1_setup_eagerbufs()
1848 dma_alloc_coherent(&dd->pcidev->dev, in hfi1_setup_eagerbufs()
1849 rcd->egrbufs.rcvtid_size, in hfi1_setup_eagerbufs()
1850 &rcd->egrbufs.buffers[idx].dma, in hfi1_setup_eagerbufs()
1852 if (rcd->egrbufs.buffers[idx].addr) { in hfi1_setup_eagerbufs()
1853 rcd->egrbufs.buffers[idx].len = in hfi1_setup_eagerbufs()
1854 rcd->egrbufs.rcvtid_size; in hfi1_setup_eagerbufs()
1855 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = in hfi1_setup_eagerbufs()
1856 rcd->egrbufs.buffers[idx].addr; in hfi1_setup_eagerbufs()
1857 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma = in hfi1_setup_eagerbufs()
1858 rcd->egrbufs.buffers[idx].dma; in hfi1_setup_eagerbufs()
1859 rcd->egrbufs.alloced++; in hfi1_setup_eagerbufs()
1860 alloced_bytes += rcd->egrbufs.rcvtid_size; in hfi1_setup_eagerbufs()
1868 * - we are already using the lowest acceptable size in hfi1_setup_eagerbufs()
1869 * - we are using one-pkt-per-egr-buffer (this implies in hfi1_setup_eagerbufs()
1872 if (rcd->egrbufs.rcvtid_size == round_mtu || in hfi1_setup_eagerbufs()
1873 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { in hfi1_setup_eagerbufs()
1875 rcd->ctxt); in hfi1_setup_eagerbufs()
1876 ret = -ENOMEM; in hfi1_setup_eagerbufs()
1880 new_size = rcd->egrbufs.rcvtid_size / 2; in hfi1_setup_eagerbufs()
1888 rcd->egrbufs.rcvtid_size = new_size; in hfi1_setup_eagerbufs()
1893 * Re-partition already allocated buffers to a smaller in hfi1_setup_eagerbufs()
1896 rcd->egrbufs.alloced = 0; in hfi1_setup_eagerbufs()
1898 if (i >= rcd->egrbufs.count) in hfi1_setup_eagerbufs()
1900 rcd->egrbufs.rcvtids[i].dma = in hfi1_setup_eagerbufs()
1901 rcd->egrbufs.buffers[j].dma + offset; in hfi1_setup_eagerbufs()
1902 rcd->egrbufs.rcvtids[i].addr = in hfi1_setup_eagerbufs()
1903 rcd->egrbufs.buffers[j].addr + offset; in hfi1_setup_eagerbufs()
1904 rcd->egrbufs.alloced++; in hfi1_setup_eagerbufs()
1905 if ((rcd->egrbufs.buffers[j].dma + offset + in hfi1_setup_eagerbufs()
1907 (rcd->egrbufs.buffers[j].dma + in hfi1_setup_eagerbufs()
1908 rcd->egrbufs.buffers[j].len)) { in hfi1_setup_eagerbufs()
1915 rcd->egrbufs.rcvtid_size = new_size; in hfi1_setup_eagerbufs()
1918 rcd->egrbufs.numbufs = idx; in hfi1_setup_eagerbufs()
1919 rcd->egrbufs.size = alloced_bytes; in hfi1_setup_eagerbufs()
1923 rcd->ctxt, rcd->egrbufs.alloced, in hfi1_setup_eagerbufs()
1924 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024); in hfi1_setup_eagerbufs()
1931 rcd->egrbufs.threshold = in hfi1_setup_eagerbufs()
1932 rounddown_pow_of_two(rcd->egrbufs.alloced / 2); in hfi1_setup_eagerbufs()
1938 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; in hfi1_setup_eagerbufs()
1939 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); in hfi1_setup_eagerbufs()
1940 rcd->expected_count = max_entries - egrtop; in hfi1_setup_eagerbufs()
1941 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) in hfi1_setup_eagerbufs()
1942 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; in hfi1_setup_eagerbufs()
1944 rcd->expected_base = rcd->eager_base + egrtop; in hfi1_setup_eagerbufs()
1946 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, in hfi1_setup_eagerbufs()
1947 rcd->eager_base, rcd->expected_base); in hfi1_setup_eagerbufs()
1949 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { in hfi1_setup_eagerbufs()
1952 rcd->ctxt, rcd->egrbufs.rcvtid_size); in hfi1_setup_eagerbufs()
1953 ret = -EINVAL; in hfi1_setup_eagerbufs()
1957 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { in hfi1_setup_eagerbufs()
1958 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, in hfi1_setup_eagerbufs()
1959 rcd->egrbufs.rcvtids[idx].dma, order); in hfi1_setup_eagerbufs()
1966 for (idx = 0; idx < rcd->egrbufs.alloced && in hfi1_setup_eagerbufs()
1967 rcd->egrbufs.buffers[idx].addr; in hfi1_setup_eagerbufs()
1969 dma_free_coherent(&dd->pcidev->dev, in hfi1_setup_eagerbufs()
1970 rcd->egrbufs.buffers[idx].len, in hfi1_setup_eagerbufs()
1971 rcd->egrbufs.buffers[idx].addr, in hfi1_setup_eagerbufs()
1972 rcd->egrbufs.buffers[idx].dma); in hfi1_setup_eagerbufs()
1973 rcd->egrbufs.buffers[idx].addr = NULL; in hfi1_setup_eagerbufs()
1974 rcd->egrbufs.buffers[idx].dma = 0; in hfi1_setup_eagerbufs()
1975 rcd->egrbufs.buffers[idx].len = 0; in hfi1_setup_eagerbufs()