Lines Matching full:dd

21 void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)  in __cm_reset()  argument
23 write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK); in __cm_reset()
26 sendctrl = read_csr(dd, SEND_CTRL); in __cm_reset()
33 void pio_send_control(struct hfi1_devdata *dd, int op) in pio_send_control() argument
41 spin_lock_irqsave(&dd->sendctrl_lock, flags); in pio_send_control()
43 reg = read_csr(dd, SEND_CTRL); in pio_send_control()
50 for (i = 0; i < ARRAY_SIZE(dd->vld); i++) in pio_send_control()
51 if (!dd->vld[i].mtu) in pio_send_control()
68 __cm_reset(dd, reg); in pio_send_control()
76 dd_dev_err(dd, "%s: invalid control %d\n", __func__, op); in pio_send_control()
81 write_csr(dd, SEND_CTRL, reg); in pio_send_control()
83 (void)read_csr(dd, SEND_CTRL); /* flush write */ in pio_send_control()
86 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in pio_send_control()
181 int init_sc_pools_and_sizes(struct hfi1_devdata *dd) in init_sc_pools_and_sizes() argument
184 int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1; in init_sc_pools_and_sizes()
233 dd, in init_sc_pools_and_sizes()
246 dd, in init_sc_pools_and_sizes()
254 dd, in init_sc_pools_and_sizes()
263 dd, in init_sc_pools_and_sizes()
289 count = dd->n_krcv_queues; in init_sc_pools_and_sizes()
293 count = dd->num_rcv_contexts - dd->n_krcv_queues; in init_sc_pools_and_sizes()
296 dd, in init_sc_pools_and_sizes()
301 if (total_contexts + count > chip_send_contexts(dd)) in init_sc_pools_and_sizes()
302 count = chip_send_contexts(dd) - total_contexts; in init_sc_pools_and_sizes()
319 dd, in init_sc_pools_and_sizes()
325 dd->sc_sizes[i].count = count; in init_sc_pools_and_sizes()
326 dd->sc_sizes[i].size = size; in init_sc_pools_and_sizes()
330 dd, in init_sc_pools_and_sizes()
340 dd, in init_sc_pools_and_sizes()
357 dd, in init_sc_pools_and_sizes()
366 dd, in init_sc_pools_and_sizes()
378 if (dd->sc_sizes[i].size < 0) { in init_sc_pools_and_sizes()
379 unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size); in init_sc_pools_and_sizes()
382 dd->sc_sizes[i].size = mem_pool_info[pool].size; in init_sc_pools_and_sizes()
386 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS) in init_sc_pools_and_sizes()
387 dd->sc_sizes[i].size = PIO_MAX_BLOCKS; in init_sc_pools_and_sizes()
390 used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count; in init_sc_pools_and_sizes()
394 dd_dev_info(dd, "unused send context blocks: %d\n", extra); in init_sc_pools_and_sizes()
399 int init_send_contexts(struct hfi1_devdata *dd) in init_send_contexts() argument
404 ret = init_credit_return(dd); in init_send_contexts()
408 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8), in init_send_contexts()
410 dd->send_contexts = kcalloc(dd->num_send_contexts, in init_send_contexts()
413 if (!dd->send_contexts || !dd->hw_to_sw) { in init_send_contexts()
414 kfree(dd->hw_to_sw); in init_send_contexts()
415 kfree(dd->send_contexts); in init_send_contexts()
416 free_credit_return(dd); in init_send_contexts()
422 dd->hw_to_sw[i] = INVALID_SCI; in init_send_contexts()
431 struct sc_config_sizes *scs = &dd->sc_sizes[i]; in init_send_contexts()
435 &dd->send_contexts[context]; in init_send_contexts()
451 * Must be called with dd->sc_lock held.
453 static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index, in sc_hw_alloc() argument
460 for (index = 0, sci = &dd->send_contexts[0]; in sc_hw_alloc()
461 index < dd->num_send_contexts; index++, sci++) { in sc_hw_alloc()
465 context = chip_send_contexts(dd) - index - 1; in sc_hw_alloc()
466 dd->hw_to_sw[context] = index; in sc_hw_alloc()
472 dd_dev_err(dd, "Unable to locate a free type %d send context\n", type); in sc_hw_alloc()
479 * Must be called with dd->sc_lock held.
481 static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context) in sc_hw_free() argument
485 sci = &dd->send_contexts[sw_index]; in sc_hw_free()
487 dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n", in sc_hw_free()
491 dd->hw_to_sw[hw_context] = INVALID_SCI; in sc_hw_free()
524 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; in cr_group_addresses()
526 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; in cr_group_addresses()
603 write_kctxt_csr(sc->dd, sc->hw_context, in sc_set_cr_threshold()
623 struct hfi1_devdata *dd = sc->dd; in set_pio_integrity() local
627 write_kctxt_csr(dd, hw_context, in set_pio_integrity()
629 hfi1_pkt_default_send_ctxt_mask(dd, type)); in set_pio_integrity()
654 struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, in sc_alloc() argument
669 if (dd->flags & HFI1_FROZEN) in sc_alloc()
679 dd_dev_err(dd, in sc_alloc()
685 spin_lock_irqsave(&dd->sc_lock, flags); in sc_alloc()
686 ret = sc_hw_alloc(dd, type, &sw_index, &hw_context); in sc_alloc()
688 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_alloc()
694 sci = &dd->send_contexts[sw_index]; in sc_alloc()
697 sc->dd = dd; in sc_alloc()
720 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) in sc_alloc()
728 write_kctxt_csr(dd, hw_context, SC(CTRL), reg); in sc_alloc()
733 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1); in sc_alloc()
736 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), in sc_alloc()
751 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), in sc_alloc()
757 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg); in sc_alloc()
789 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg); in sc_alloc()
794 write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg); in sc_alloc()
797 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_alloc()
838 struct hfi1_devdata *dd; in sc_free() local
847 dd = sc->dd; in sc_free()
849 dd_dev_err(dd, "piowait list not empty!\n"); in sc_free()
855 spin_lock_irqsave(&dd->sc_lock, flags); in sc_free()
856 dd->send_contexts[sw_index].sc = NULL; in sc_free()
859 write_kctxt_csr(dd, hw_context, SC(CTRL), 0); in sc_free()
860 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0); in sc_free()
861 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0); in sc_free()
862 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0); in sc_free()
863 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0); in sc_free()
864 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0); in sc_free()
865 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0); in sc_free()
868 sc_hw_free(dd, sw_index, hw_context); in sc_free()
869 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_free()
888 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); in sc_disable()
892 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); in sc_disable()
949 static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context) in is_sc_halted() argument
951 return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) & in is_sc_halted()
971 struct hfi1_devdata *dd = sc->dd; in sc_wait_for_packet_egress() local
978 reg = read_csr(dd, sc->hw_context * 8 + in sc_wait_for_packet_egress()
982 is_sc_halted(dd, sc->hw_context) || egress_halted(reg)) in sc_wait_for_packet_egress()
992 dd_dev_err(dd, in sc_wait_for_packet_egress()
996 queue_work(dd->pport->link_wq, in sc_wait_for_packet_egress()
997 &dd->pport->link_bounce_work); in sc_wait_for_packet_egress()
1006 pause_for_credit_return(dd); in sc_wait_for_packet_egress()
1009 void sc_wait(struct hfi1_devdata *dd) in sc_wait() argument
1013 for (i = 0; i < dd->num_send_contexts; i++) { in sc_wait()
1014 struct send_context *sc = dd->send_contexts[i].sc; in sc_wait()
1033 struct hfi1_devdata *dd = sc->dd; in sc_restart() local
1042 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, in sc_restart()
1053 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS)); in sc_restart()
1057 dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n", in sc_restart()
1083 dd_dev_err(dd, in sc_restart()
1118 void pio_freeze(struct hfi1_devdata *dd) in pio_freeze() argument
1123 for (i = 0; i < dd->num_send_contexts; i++) { in pio_freeze()
1124 sc = dd->send_contexts[i].sc; in pio_freeze()
1145 void pio_kernel_unfreeze(struct hfi1_devdata *dd) in pio_kernel_unfreeze() argument
1150 for (i = 0; i < dd->num_send_contexts; i++) { in pio_kernel_unfreeze()
1151 sc = dd->send_contexts[i].sc; in pio_kernel_unfreeze()
1163 * @dd: valid devive data
1173 void pio_kernel_linkup(struct hfi1_devdata *dd) in pio_kernel_linkup() argument
1178 for (i = 0; i < dd->num_send_contexts; i++) { in pio_kernel_linkup()
1179 sc = dd->send_contexts[i].sc; in pio_kernel_linkup()
1193 static int pio_init_wait_progress(struct hfi1_devdata *dd) in pio_init_wait_progress() argument
1199 max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5; in pio_init_wait_progress()
1201 reg = read_csr(dd, SEND_PIO_INIT_CTXT); in pio_init_wait_progress()
1217 void pio_reset_all(struct hfi1_devdata *dd) in pio_reset_all() argument
1222 ret = pio_init_wait_progress(dd); in pio_reset_all()
1226 write_csr(dd, SEND_PIO_ERR_CLEAR, in pio_reset_all()
1231 write_csr(dd, SEND_PIO_INIT_CTXT, in pio_reset_all()
1234 ret = pio_init_wait_progress(dd); in pio_reset_all()
1236 dd_dev_err(dd, in pio_reset_all()
1246 struct hfi1_devdata *dd; in sc_enable() local
1252 dd = sc->dd; in sc_enable()
1262 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1285 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); in sc_enable()
1287 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg); in sc_enable()
1293 spin_lock(&dd->sc_init_lock); in sc_enable()
1304 write_csr(dd, SEND_PIO_INIT_CTXT, pio); in sc_enable()
1310 ret = pio_init_wait_progress(dd); in sc_enable()
1311 spin_unlock(&dd->sc_init_lock); in sc_enable()
1313 dd_dev_err(dd, in sc_enable()
1323 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl); in sc_enable()
1328 read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1344 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), in sc_return_credits()
1350 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE)); in sc_return_credits()
1352 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0); in sc_return_credits()
1508 write_kctxt_csr(sc->dd, sc->hw_context, in sc_add_credit_return_intr()
1530 write_kctxt_csr(sc->dd, sc->hw_context, in sc_del_credit_return_intr()
1561 struct hfi1_devdata *dd = sc->dd; in sc_piobufavail() local
1569 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL && in sc_piobufavail()
1570 dd->send_contexts[sc->sw_index].type != SC_VL15) in sc_piobufavail()
1708 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context) in sc_group_release_update() argument
1714 spin_lock(&dd->sc_lock); in sc_group_release_update()
1715 sw_index = dd->hw_to_sw[hw_context]; in sc_group_release_update()
1716 if (unlikely(sw_index >= dd->num_send_contexts)) { in sc_group_release_update()
1717 dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n", in sc_group_release_update()
1721 sc = dd->send_contexts[sw_index].sc; in sc_group_release_update()
1728 sw_index = dd->hw_to_sw[gc]; in sc_group_release_update()
1729 if (unlikely(sw_index >= dd->num_send_contexts)) { in sc_group_release_update()
1730 dd_dev_err(dd, in sc_group_release_update()
1735 sc_release_update(dd->send_contexts[sw_index].sc); in sc_group_release_update()
1738 spin_unlock(&dd->sc_lock); in sc_group_release_update()
1743 * @dd: devdata
1750 struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd, in pio_select_send_context_vl() argument
1768 m = rcu_dereference(dd->pio_map); in pio_select_send_context_vl()
1771 return dd->vld[0].sc; in pio_select_send_context_vl()
1778 rval = !rval ? dd->vld[0].sc : rval; in pio_select_send_context_vl()
1784 * @dd: devdata
1790 struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd, in pio_select_send_context_sc() argument
1793 u8 vl = sc_to_vlt(dd, sc5); in pio_select_send_context_sc()
1795 return pio_select_send_context_vl(dd, selector, vl); in pio_select_send_context_sc()
1823 static void set_threshold(struct hfi1_devdata *dd, int scontext, int i) in set_threshold() argument
1827 thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], in set_threshold()
1829 sc_mtu_to_threshold(dd->kernel_send_context[scontext], in set_threshold()
1830 dd->vld[i].mtu, in set_threshold()
1831 dd->rcd[0]->rcvhdrqentsize)); in set_threshold()
1832 sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); in set_threshold()
1837 * @dd: hfi1_devdata
1863 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) in pio_map_init() argument
1873 for (i = 0; i < dd->num_send_contexts; i++) in pio_map_init()
1874 if (dd->send_contexts[i].type == SC_KERNEL) in pio_map_init()
1912 if (dd->kernel_send_context[scontext]) { in pio_map_init()
1914 dd->kernel_send_context[scontext]; in pio_map_init()
1915 set_threshold(dd, scontext, i); in pio_map_init()
1929 spin_lock_irq(&dd->pio_map_lock); in pio_map_init()
1930 oldmap = rcu_dereference_protected(dd->pio_map, in pio_map_init()
1931 lockdep_is_held(&dd->pio_map_lock)); in pio_map_init()
1934 rcu_assign_pointer(dd->pio_map, newmap); in pio_map_init()
1936 spin_unlock_irq(&dd->pio_map_lock); in pio_map_init()
1947 void free_pio_map(struct hfi1_devdata *dd) in free_pio_map() argument
1950 if (rcu_access_pointer(dd->pio_map)) { in free_pio_map()
1951 spin_lock_irq(&dd->pio_map_lock); in free_pio_map()
1952 pio_map_free(rcu_access_pointer(dd->pio_map)); in free_pio_map()
1953 RCU_INIT_POINTER(dd->pio_map, NULL); in free_pio_map()
1954 spin_unlock_irq(&dd->pio_map_lock); in free_pio_map()
1957 kfree(dd->kernel_send_context); in free_pio_map()
1958 dd->kernel_send_context = NULL; in free_pio_map()
1961 int init_pervl_scs(struct hfi1_devdata *dd) in init_pervl_scs() argument
1967 struct hfi1_pportdata *ppd = dd->pport; in init_pervl_scs()
1969 dd->vld[15].sc = sc_alloc(dd, SC_VL15, in init_pervl_scs()
1970 dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
1971 if (!dd->vld[15].sc) in init_pervl_scs()
1974 hfi1_init_ctxt(dd->vld[15].sc); in init_pervl_scs()
1975 dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048); in init_pervl_scs()
1977 dd->kernel_send_context = kcalloc_node(dd->num_send_contexts, in init_pervl_scs()
1979 GFP_KERNEL, dd->node); in init_pervl_scs()
1980 if (!dd->kernel_send_context) in init_pervl_scs()
1983 dd->kernel_send_context[0] = dd->vld[15].sc; in init_pervl_scs()
1993 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL, in init_pervl_scs()
1994 dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
1995 if (!dd->vld[i].sc) in init_pervl_scs()
1997 dd->kernel_send_context[i + 1] = dd->vld[i].sc; in init_pervl_scs()
1998 hfi1_init_ctxt(dd->vld[i].sc); in init_pervl_scs()
2000 dd->vld[i].mtu = hfi1_max_mtu; in init_pervl_scs()
2003 dd->kernel_send_context[i + 1] = in init_pervl_scs()
2004 sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
2005 if (!dd->kernel_send_context[i + 1]) in init_pervl_scs()
2007 hfi1_init_ctxt(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2010 sc_enable(dd->vld[15].sc); in init_pervl_scs()
2011 ctxt = dd->vld[15].sc->hw_context; in init_pervl_scs()
2013 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
2014 dd_dev_info(dd, in init_pervl_scs()
2016 dd->vld[15].sc->sw_index, ctxt); in init_pervl_scs()
2019 sc_enable(dd->vld[i].sc); in init_pervl_scs()
2020 ctxt = dd->vld[i].sc->hw_context; in init_pervl_scs()
2022 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
2025 sc_enable(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2026 ctxt = dd->kernel_send_context[i + 1]->hw_context; in init_pervl_scs()
2028 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
2031 if (pio_map_init(dd, ppd->port - 1, num_vls, NULL)) in init_pervl_scs()
2037 sc_free(dd->vld[i].sc); in init_pervl_scs()
2038 dd->vld[i].sc = NULL; in init_pervl_scs()
2042 sc_free(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2044 kfree(dd->kernel_send_context); in init_pervl_scs()
2045 dd->kernel_send_context = NULL; in init_pervl_scs()
2048 sc_free(dd->vld[15].sc); in init_pervl_scs()
2052 int init_credit_return(struct hfi1_devdata *dd) in init_credit_return() argument
2057 dd->cr_base = kcalloc( in init_credit_return()
2061 if (!dd->cr_base) { in init_credit_return()
2068 set_dev_node(&dd->pcidev->dev, i); in init_credit_return()
2069 dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev, in init_credit_return()
2071 &dd->cr_base[i].dma, in init_credit_return()
2073 if (!dd->cr_base[i].va) { in init_credit_return()
2074 set_dev_node(&dd->pcidev->dev, dd->node); in init_credit_return()
2075 dd_dev_err(dd, in init_credit_return()
2082 set_dev_node(&dd->pcidev->dev, dd->node); in init_credit_return()
2089 free_credit_return(dd); in init_credit_return()
2093 void free_credit_return(struct hfi1_devdata *dd) in free_credit_return() argument
2097 if (!dd->cr_base) in free_credit_return()
2100 if (dd->cr_base[i].va) { in free_credit_return()
2101 dma_free_coherent(&dd->pcidev->dev, in free_credit_return()
2104 dd->cr_base[i].va, in free_credit_return()
2105 dd->cr_base[i].dma); in free_credit_return()
2108 kfree(dd->cr_base); in free_credit_return()
2109 dd->cr_base = NULL; in free_credit_return()
2128 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS)); in seqfile_dump_sci()