Lines Matching +full:port +full:- +full:mapping

17  *      - Redistributions of source code must retain the above
21 * - Redistributions in binary form must reproduce the above
38 #include <linux/dma-mapping.h>
64 return ERR_PTR(-ENOMEM);
66 ah->dev = dev;
67 ah->last_send = 0;
68 kref_init(&ah->ref);
75 ah->ah = vah;
76 ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah);
85 struct ipoib_dev_priv *priv = ipoib_priv(ah->dev);
89 spin_lock_irqsave(&priv->lock, flags);
90 list_add_tail(&ah->list, &priv->dead_ahs);
91 spin_unlock_irqrestore(&priv->lock, flags);
95 u64 mapping[IPOIB_UD_RX_SG])
97 ib_dma_unmap_single(priv->ca, mapping[0],
98 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
107 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
108 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
109 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
112 ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL);
115 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
116 dev_kfree_skb_any(priv->rx_ring[id].skb);
117 priv->rx_ring[id].skb = NULL;
128 u64 *mapping;
130 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
142 mapping = priv->rx_ring[id].mapping;
143 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
145 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
148 priv->rx_ring[id].skb = skb;
163 return -ENOMEM;
167 return -EIO;
177 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
179 u64 mapping[IPOIB_UD_RX_SG];
184 wr_id, wc->status);
192 skb = priv->rx_ring[wr_id].skb;
194 if (unlikely(wc->status != IB_WC_SUCCESS)) {
195 if (wc->status != IB_WC_WR_FLUSH_ERR)
198 wc->status, wr_id, wc->vendor_err);
199 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
201 priv->rx_ring[wr_id].skb = NULL;
205 memcpy(mapping, priv->rx_ring[wr_id].mapping,
206 IPOIB_UD_RX_SG * sizeof(*mapping));
213 ++dev->stats.rx_dropped;
218 wc->byte_len, wc->slid);
220 ipoib_ud_dma_unmap_rx(priv, mapping);
222 skb_put(skb, wc->byte_len);
225 dgid = &((struct ib_grh *)skb->data)->dgid;
227 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
228 skb->pkt_type = PACKET_HOST;
229 else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
230 skb->pkt_type = PACKET_BROADCAST;
232 skb->pkt_type = PACKET_MULTICAST;
234 sgid = &((struct ib_grh *)skb->data)->sgid;
240 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
243 if ((wc->wc_flags & IB_WC_GRH) &&
244 sgid->global.interface_id != priv->local_gid.global.interface_id)
255 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
258 ++dev->stats.rx_packets;
259 dev->stats.rx_bytes += skb->len;
260 if (skb->pkt_type == PACKET_MULTICAST)
261 dev->stats.multicast++;
263 skb->dev = dev;
264 if ((dev->features & NETIF_F_RXCSUM) &&
265 likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
266 skb->ip_summed = CHECKSUM_UNNECESSARY;
268 napi_gro_receive(&priv->recv_napi, skb);
278 struct sk_buff *skb = tx_req->skb;
279 u64 *mapping = tx_req->mapping;
284 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
286 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
287 return -EIO;
293 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
294 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
295 mapping[i + off] = ib_dma_map_page(ca,
300 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
306 for (; i > 0; --i) {
307 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
309 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
313 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
315 return -EIO;
321 struct sk_buff *skb = tx_req->skb;
322 u64 *mapping = tx_req->mapping;
327 ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
333 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
334 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
336 ib_dma_unmap_page(priv->ca, mapping[i + off],
351 struct ipoib_dev_priv *priv = qp_work->priv;
356 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
363 __func__, priv->qp->qp_num, qp_attr.qp_state);
365 /* currently support only in SQE->RTS transition*/
369 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
371 pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
372 ret, priv->qp->qp_num);
376 __func__, priv->qp->qp_num);
379 priv->qp->qp_num, qp_attr.qp_state);
389 unsigned int wr_id = wc->wr_id;
393 wr_id, wc->status);
401 tx_req = &priv->tx_ring[wr_id];
405 ++dev->stats.tx_packets;
406 dev->stats.tx_bytes += tx_req->skb->len;
408 dev_kfree_skb_any(tx_req->skb);
410 ++priv->tx_tail;
411 ++priv->global_tx_tail;
414 ((priv->global_tx_head - priv->global_tx_tail) <=
416 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
419 if (wc->status != IB_WC_SUCCESS &&
420 wc->status != IB_WC_WR_FLUSH_ERR) {
424 wc->status, wr_id, wc->vendor_err);
429 INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
430 qp_work->priv = priv;
431 queue_work(priv->wq, &qp_work->work);
440 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
442 wc = priv->send_wc + i;
443 if (wc->wr_id & IPOIB_OP_CM)
444 ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i);
446 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
455 struct net_device *dev = priv->dev;
464 int max = (budget - done);
467 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
470 struct ib_wc *wc = priv->ibwc + i;
472 if (wc->wr_id & IPOIB_OP_RECV) {
474 if (wc->wr_id & IPOIB_OP_CM)
489 if (unlikely(ib_req_notify_cq(priv->recv_cq,
503 struct net_device *dev = priv->dev;
508 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
511 wc = priv->send_wc + i;
512 if (wc->wr_id & IPOIB_OP_CM)
520 if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
532 napi_schedule(&priv->recv_napi);
543 ret = napi_schedule(&priv->send_napi);
546 } while (!ret && netif_queue_stopped(priv->dev) &&
547 test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags));
555 ret = napi_schedule(&priv->send_napi);
560 * its re-arm of the napi.
562 if (!ret && netif_queue_stopped(priv->dev))
563 schedule_work(&priv->reschedule_napi_work);
572 struct sk_buff *skb = tx_req->skb;
576 priv->tx_wr.wr.wr_id = wr_id;
577 priv->tx_wr.remote_qpn = dqpn;
578 priv->tx_wr.ah = address;
581 priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
582 priv->tx_wr.header = head;
583 priv->tx_wr.hlen = hlen;
584 priv->tx_wr.wr.opcode = IB_WR_LSO;
586 priv->tx_wr.wr.opcode = IB_WR_SEND;
588 return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL);
598 unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
602 phead = skb->data;
605 ++dev->stats.tx_dropped;
606 ++dev->stats.tx_errors;
608 return -1;
611 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
613 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
614 ++dev->stats.tx_dropped;
615 ++dev->stats.tx_errors;
616 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
617 return -1;
622 if (skb_shinfo(skb)->nr_frags > usable_sge) {
625 ++dev->stats.tx_dropped;
626 ++dev->stats.tx_errors;
628 return -1;
631 if (skb_shinfo(skb)->nr_frags > usable_sge) {
633 ++dev->stats.tx_dropped;
634 ++dev->stats.tx_errors;
636 return -1;
642 skb->len, address, dqpn);
651 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
652 tx_req->skb = skb;
653 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
654 ++dev->stats.tx_errors;
656 return -1;
659 if (skb->ip_summed == CHECKSUM_PARTIAL)
660 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
662 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
664 if ((priv->global_tx_head - priv->global_tx_tail) ==
665 ipoib_sendq_size - 1) {
674 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
678 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
682 ++dev->stats.tx_errors;
691 rc = priv->tx_head;
692 ++priv->tx_head;
693 ++priv->global_tx_head;
703 netif_tx_lock_bh(priv->dev);
704 spin_lock_irqsave(&priv->lock, flags);
706 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
707 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
708 list_del(&ah->list);
709 rdma_destroy_ah(ah->ah, 0);
713 spin_unlock_irqrestore(&priv->lock, flags);
714 netif_tx_unlock_bh(priv->dev);
724 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
725 queue_delayed_work(priv->wq, &priv->ah_reap_task,
731 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
732 queue_delayed_work(priv->wq, &priv->ah_reap_task,
738 set_bit(IPOIB_STOP_REAPER, &priv->flags);
739 cancel_delayed_work(&priv->ah_reap_task);
754 if (priv->rx_ring[i].skb)
773 /* print according to the new-state and the previous state.*/
786 napi_enable_locked(&priv->recv_napi);
787 napi_enable_locked(&priv->send_napi);
796 napi_disable_locked(&priv->recv_napi);
797 napi_disable_locked(&priv->send_napi);
809 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
819 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
820 check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
825 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
829 priv->tx_head - priv->tx_tail,
836 while ((int)priv->tx_tail - (int)priv->tx_head < 0) {
837 tx_req = &priv->tx_ring[priv->tx_tail &
838 (ipoib_sendq_size - 1)];
840 dev_kfree_skb_any(tx_req->skb);
841 ++priv->tx_tail;
842 ++priv->global_tx_tail;
848 rx_req = &priv->rx_ring[i];
849 if (!rx_req->skb)
852 priv->rx_ring[i].mapping);
853 dev_kfree_skb_any(rx_req->skb);
854 rx_req->skb = NULL;
869 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
872 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
885 return -1;
900 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
905 return -1;
914 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
915 ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
916 (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
917 return -1;
921 if (priv->rn_ops->ndo_open(dev)) {
922 pr_warn("%s: Failed to open dev\n", dev->name);
926 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
932 return -1;
939 priv->rn_ops->ndo_stop(dev);
941 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
950 if (!(priv->pkey & 0x7fff) ||
951 ib_find_pkey(priv->ca, priv->port, priv->pkey,
952 &priv->pkey_index)) {
953 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
955 if (rn->set_id)
956 rn->set_id(dev, priv->pkey_index);
957 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
967 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
972 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
983 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
999 * called from the BH-disabled NAPI poll context, so disable
1005 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
1012 if (priv->ibwc[i].status == IB_WC_SUCCESS)
1013 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
1015 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
1016 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
1017 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
1019 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
1033 * Takes whatever value which is in pkey index 0 and updates priv->pkey
1041 prev_pkey = priv->pkey;
1042 result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
1044 ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
1045 priv->port, result);
1049 priv->pkey |= 0x8000;
1051 if (prev_pkey != priv->pkey) {
1053 prev_pkey, priv->pkey);
1058 priv->dev->broadcast[8] = priv->pkey >> 8;
1059 priv->dev->broadcast[9] = priv->pkey & 0xff;
1070 u16 old_index = priv->pkey_index;
1072 priv->pkey_index = 0;
1073 ipoib_pkey_dev_check_presence(priv->dev);
1075 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
1076 (old_index == priv->pkey_index))
1091 u32 port;
1094 if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
1097 netif_addr_lock_bh(priv->dev);
1102 priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1103 dev_addr_mod(priv->dev, 4, (u8 *)&gid0.global.subnet_prefix,
1107 search_gid.global.interface_id = priv->local_gid.global.interface_id;
1109 netif_addr_unlock_bh(priv->dev);
1111 err = ib_find_gid(priv->ca, &search_gid, &port, &index);
1113 netif_addr_lock_bh(priv->dev);
1116 priv->local_gid.global.interface_id)
1123 * Per IB spec the port GUID can't change if the HCA is powered on.
1124 * port GUID is the basis for GID at index 0 which is the basis for
1135 * they change the port GUID when the HCA is powered, so in order
1143 * the port GUID has changed and GID at index 0 has changed
1144 * so we need to change priv->local_gid and priv->dev->dev_addr
1147 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
1148 if (!err && port == priv->port) {
1149 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1152 &priv->flags);
1154 set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
1160 if (!err && port == priv->port) {
1163 if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
1164 memcpy(&priv->local_gid, &gid0,
1165 sizeof(priv->local_gid));
1166 dev_addr_mod(priv->dev, 4, (u8 *)&gid0,
1167 sizeof(priv->local_gid));
1174 netif_addr_unlock_bh(priv->dev);
1182 struct net_device *dev = priv->dev;
1185 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
1190 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
1194 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1197 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1203 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1208 /* child devices chase their origin pkey value, while non-child
1211 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1215 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
1223 ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1237 oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1240 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1249 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1266 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1270 ipoib_mcast_restart_task(&priv->restart_task);
1303 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1306 netdev_lock(priv->dev);
1307 list_for_each_entry(cpriv, &priv->child_intfs, list)
1309 netdev_unlock(priv->dev);
1314 queue_work(ipoib_workqueue, &priv->flush_light);
1317 queue_work(ipoib_workqueue, &priv->flush_normal);
1320 queue_work(ipoib_workqueue, &priv->flush_heavy);
1347 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
1349 priv->rn_ops->ndo_uninit(dev);
1351 if (priv->pd) {
1352 ib_dealloc_pd(priv->pd);
1353 priv->pd = NULL;