Lines Matching defs:priv
85 struct ipoib_dev_priv *priv = ipoib_priv(ah->dev);
89 spin_lock_irqsave(&priv->lock, flags);
90 list_add_tail(&ah->list, &priv->dead_ahs);
91 spin_unlock_irqrestore(&priv->lock, flags);
94 static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
97 ib_dma_unmap_single(priv->ca, mapping[0],
98 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
104 struct ipoib_dev_priv *priv = ipoib_priv(dev);
107 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
108 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
109 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
112 ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL);
114 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
115 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
116 dev_kfree_skb_any(priv->rx_ring[id].skb);
117 priv->rx_ring[id].skb = NULL;
125 struct ipoib_dev_priv *priv = ipoib_priv(dev);
130 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
142 mapping = priv->rx_ring[id].mapping;
143 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
145 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
148 priv->rx_ring[id].skb = skb;
157 struct ipoib_dev_priv *priv = ipoib_priv(dev);
162 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
166 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
176 struct ipoib_dev_priv *priv = ipoib_priv(dev);
183 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
187 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
192 skb = priv->rx_ring[wr_id].skb;
196 ipoib_warn(priv,
199 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
201 priv->rx_ring[wr_id].skb = NULL;
205 memcpy(mapping, priv->rx_ring[wr_id].mapping,
217 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
220 ipoib_ud_dma_unmap_rx(priv, mapping);
240 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
244 sgid->global.interface_id != priv->local_gid.global.interface_id)
268 napi_gro_receive(&priv->recv_napi, skb);
272 ipoib_warn(priv, "ipoib_ib_post_receive failed "
318 void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
327 ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
336 ib_dma_unmap_page(priv->ca, mapping[i + off],
351 struct ipoib_dev_priv *priv = qp_work->priv;
356 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
358 ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
363 __func__, priv->qp->qp_num, qp_attr.qp_state);
369 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
372 ret, priv->qp->qp_num);
376 __func__, priv->qp->qp_num);
379 priv->qp->qp_num, qp_attr.qp_state);
388 struct ipoib_dev_priv *priv = ipoib_priv(dev);
392 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
396 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
401 tx_req = &priv->tx_ring[wr_id];
403 ipoib_dma_unmap_tx(priv, tx_req);
410 ++priv->tx_tail;
411 ++priv->global_tx_tail;
414 ((priv->global_tx_head - priv->global_tx_tail) <=
416 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
422 ipoib_warn(priv,
430 qp_work->priv = priv;
431 queue_work(priv->wq, &qp_work->work);
435 static int poll_tx(struct ipoib_dev_priv *priv)
440 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
442 wc = priv->send_wc + i;
444 ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i);
446 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
453 struct ipoib_dev_priv *priv =
455 struct net_device *dev = priv->dev;
467 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
470 struct ib_wc *wc = priv->ibwc + i;
489 if (unlikely(ib_req_notify_cq(priv->recv_cq,
501 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv,
503 struct net_device *dev = priv->dev;
508 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
511 wc = priv->send_wc + i;
520 if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
530 struct ipoib_dev_priv *priv = ctx_ptr;
532 napi_schedule(&priv->recv_napi);
538 struct ipoib_dev_priv *priv =
543 ret = napi_schedule(&priv->send_napi);
546 } while (!ret && netif_queue_stopped(priv->dev) &&
547 test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags));
552 struct ipoib_dev_priv *priv = ctx_ptr;
555 ret = napi_schedule(&priv->send_napi);
562 if (!ret && netif_queue_stopped(priv->dev))
563 schedule_work(&priv->reschedule_napi_work);
566 static inline int post_send(struct ipoib_dev_priv *priv,
574 ipoib_build_sge(priv, tx_req);
576 priv->tx_wr.wr.wr_id = wr_id;
577 priv->tx_wr.remote_qpn = dqpn;
578 priv->tx_wr.ah = address;
581 priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
582 priv->tx_wr.header = head;
583 priv->tx_wr.hlen = hlen;
584 priv->tx_wr.wr.opcode = IB_WR_LSO;
586 priv->tx_wr.wr.opcode = IB_WR_SEND;
588 return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL);
594 struct ipoib_dev_priv *priv = ipoib_priv(dev);
598 unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
604 ipoib_warn(priv, "linear data too small\n");
611 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
612 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
613 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
616 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
624 ipoib_warn(priv, "skb could not be linearized\n");
632 ipoib_warn(priv, "too many frags after skb linearize\n");
640 ipoib_dbg_data(priv,
651 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
653 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
660 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
662 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
664 if ((priv->global_tx_head - priv->global_tx_tail) ==
666 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
674 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
676 ipoib_warn(priv, "request notify on send CQ failed\n");
678 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
681 ipoib_warn(priv, "post_send failed, error %d\n", rc);
683 ipoib_dma_unmap_tx(priv, tx_req);
691 rc = priv->tx_head;
692 ++priv->tx_head;
693 ++priv->global_tx_head;
698 static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv)
703 netif_tx_lock_bh(priv->dev);
704 spin_lock_irqsave(&priv->lock, flags);
706 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
707 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
713 spin_unlock_irqrestore(&priv->lock, flags);
714 netif_tx_unlock_bh(priv->dev);
719 struct ipoib_dev_priv *priv =
722 ipoib_reap_dead_ahs(priv);
724 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
725 queue_delayed_work(priv->wq, &priv->ah_reap_task,
729 static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv)
731 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
732 queue_delayed_work(priv->wq, &priv->ah_reap_task,
736 static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv)
738 set_bit(IPOIB_STOP_REAPER, &priv->flags);
739 cancel_delayed_work(&priv->ah_reap_task);
749 struct ipoib_dev_priv *priv = ipoib_priv(dev);
754 if (priv->rx_ring[i].skb)
760 static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
770 ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
775 ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
777 ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
783 struct ipoib_dev_priv *priv = ipoib_priv(dev);
786 napi_enable_locked(&priv->recv_napi);
787 napi_enable_locked(&priv->send_napi);
793 struct ipoib_dev_priv *priv = ipoib_priv(dev);
796 napi_disable_locked(&priv->recv_napi);
797 napi_disable_locked(&priv->send_napi);
803 struct ipoib_dev_priv *priv = ipoib_priv(dev);
809 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
819 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
820 check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
825 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
827 ipoib_warn(priv,
829 priv->tx_head - priv->tx_tail,
836 while ((int)priv->tx_tail - (int)priv->tx_head < 0) {
837 tx_req = &priv->tx_ring[priv->tx_tail &
839 ipoib_dma_unmap_tx(priv, tx_req);
841 ++priv->tx_tail;
842 ++priv->global_tx_tail;
848 rx_req = &priv->rx_ring[i];
851 ipoib_ud_dma_unmap_rx(priv,
852 priv->rx_ring[i].mapping);
865 ipoib_dbg(priv, "All sends and receives done.\n");
869 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
870 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
872 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
879 struct ipoib_dev_priv *priv = ipoib_priv(dev);
884 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
890 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
896 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
900 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
910 struct ipoib_dev_priv *priv = ipoib_priv(dev);
914 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
915 ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
916 (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
920 ipoib_start_ah_reaper(priv);
921 if (priv->rn_ops->ndo_open(dev)) {
926 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
931 ipoib_stop_ah_reaper(priv);
937 struct ipoib_dev_priv *priv = ipoib_priv(dev);
939 priv->rn_ops->ndo_stop(dev);
941 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
942 ipoib_stop_ah_reaper(priv);
947 struct ipoib_dev_priv *priv = ipoib_priv(dev);
950 if (!(priv->pkey & 0x7fff) ||
951 ib_find_pkey(priv->ca, priv->port, priv->pkey,
952 &priv->pkey_index)) {
953 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
956 rn->set_id(dev, priv->pkey_index);
957 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
963 struct ipoib_dev_priv *priv = ipoib_priv(dev);
967 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
968 ipoib_dbg(priv, "PKEY is not assigned.\n");
972 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
979 struct ipoib_dev_priv *priv = ipoib_priv(dev);
981 ipoib_dbg(priv, "downing ib_dev\n");
983 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
994 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1005 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
1012 if (priv->ibwc[i].status == IB_WC_SUCCESS)
1013 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
1015 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
1016 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
1017 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
1019 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
1026 while (poll_tx(priv))
1033 * Takes whatever value which is in pkey index 0 and updates priv->pkey
1036 static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
1041 prev_pkey = priv->pkey;
1042 result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
1044 ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
1045 priv->port, result);
1049 priv->pkey |= 0x8000;
1051 if (prev_pkey != priv->pkey) {
1052 ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
1053 prev_pkey, priv->pkey);
1058 priv->dev->broadcast[8] = priv->pkey >> 8;
1059 priv->dev->broadcast[9] = priv->pkey & 0xff;
1068 static inline int update_child_pkey(struct ipoib_dev_priv *priv)
1070 u16 old_index = priv->pkey_index;
1072 priv->pkey_index = 0;
1073 ipoib_pkey_dev_check_presence(priv->dev);
1075 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
1076 (old_index == priv->pkey_index))
1085 static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
1094 if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
1097 netif_addr_lock_bh(priv->dev);
1102 priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1103 dev_addr_mod(priv->dev, 4, (u8 *)&gid0.global.subnet_prefix,
1107 search_gid.global.interface_id = priv->local_gid.global.interface_id;
1109 netif_addr_unlock_bh(priv->dev);
1111 err = ib_find_gid(priv->ca, &search_gid, &port, &index);
1113 netif_addr_lock_bh(priv->dev);
1116 priv->local_gid.global.interface_id)
1144 * so we need to change priv->local_gid and priv->dev->dev_addr
1147 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
1148 if (!err && port == priv->port) {
1149 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1152 &priv->flags);
1154 set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
1160 if (!err && port == priv->port) {
1163 if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
1164 memcpy(&priv->local_gid, &gid0,
1165 sizeof(priv->local_gid));
1166 dev_addr_mod(priv->dev, 4, (u8 *)&gid0,
1167 sizeof(priv->local_gid));
1174 netif_addr_unlock_bh(priv->dev);
1179 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1182 struct net_device *dev = priv->dev;
1185 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
1189 ipoib_dev_addr_changed_valid(priv);
1190 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
1194 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1197 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1198 update_parent_pkey(priv);
1200 update_child_pkey(priv);
1202 ipoib_dev_addr_changed_valid(priv);
1203 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1211 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1212 result = update_child_pkey(priv);
1215 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
1220 result = update_parent_pkey(priv);
1223 ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1237 oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1240 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1241 ipoib_reap_dead_ahs(priv);
1249 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1266 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1269 if (ipoib_dev_addr_changed_valid(priv))
1270 ipoib_mcast_restart_task(&priv->restart_task);
1276 struct ipoib_dev_priv *priv =
1279 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
1284 struct ipoib_dev_priv *priv =
1287 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
1292 struct ipoib_dev_priv *priv =
1296 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
1300 void ipoib_queue_work(struct ipoib_dev_priv *priv,
1303 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1306 netdev_lock(priv->dev);
1307 list_for_each_entry(cpriv, &priv->child_intfs, list)
1309 netdev_unlock(priv->dev);
1314 queue_work(ipoib_workqueue, &priv->flush_light);
1317 queue_work(ipoib_workqueue, &priv->flush_normal);
1320 queue_work(ipoib_workqueue, &priv->flush_heavy);
1327 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1329 ipoib_dbg(priv, "cleaning up ib_dev\n");
1332 * that may wish to touch priv fields that are no longer valid
1345 ipoib_reap_dead_ahs(priv);
1347 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
1349 priv->rn_ops->ndo_uninit(dev);
1351 if (priv->pd) {
1352 ib_dealloc_pd(priv->pd);
1353 priv->pd = NULL;