Lines Matching +full:1 +full:- +full:port
1 // SPDX-License-Identifier: GPL-2.0-or-later
11 * Jan-Bernd Themann <themann@de.ibm.com>
49 static int msg_level = -1;
54 static int use_mcs = 1;
67 "port to stack. 1:yes, 0:no. Default = 0 ");
69 "[2^x - 1], x = [7..14]. Default = "
72 "[2^x - 1], x = [7..14]. Default = "
74 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
75 "[2^x - 1], x = [7..14]. Default = "
78 "[2^x - 1], x = [7..14]. Default = "
80 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
81 "Default = 1");
102 .compatible = "IBM,lhea-ethernet",
138 static void ehea_schedule_port_reset(struct ehea_port *port) in ehea_schedule_port_reset() argument
140 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags)) in ehea_schedule_port_reset()
141 schedule_work(&port->reset_task); in ehea_schedule_port_reset()
161 struct ehea_port *port = adapter->port[k]; in ehea_update_firmware_handles() local
163 if (!port || (port->state != EHEA_PORT_UP)) in ehea_update_firmware_handles()
167 num_portres += port->num_def_qps; in ehea_update_firmware_handles()
187 struct ehea_port *port = adapter->port[k]; in ehea_update_firmware_handles() local
189 if (!port || (port->state != EHEA_PORT_UP) || in ehea_update_firmware_handles()
193 for (l = 0; l < port->num_def_qps; l++) { in ehea_update_firmware_handles()
194 struct ehea_port_res *pr = &port->port_res[l]; in ehea_update_firmware_handles()
196 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
197 arr[i++].fwh = pr->qp->fw_handle; in ehea_update_firmware_handles()
198 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
199 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles()
200 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
201 arr[i++].fwh = pr->recv_cq->fw_handle; in ehea_update_firmware_handles()
202 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
203 arr[i++].fwh = pr->eq->fw_handle; in ehea_update_firmware_handles()
204 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
205 arr[i++].fwh = pr->send_mr.handle; in ehea_update_firmware_handles()
206 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
207 arr[i++].fwh = pr->recv_mr.handle; in ehea_update_firmware_handles()
209 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
210 arr[i++].fwh = port->qp_eq->fw_handle; in ehea_update_firmware_handles()
211 num_ports--; in ehea_update_firmware_handles()
214 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
215 arr[i++].fwh = adapter->neq->fw_handle; in ehea_update_firmware_handles()
217 if (adapter->mr.handle) { in ehea_update_firmware_handles()
218 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
219 arr[i++].fwh = adapter->mr.handle; in ehea_update_firmware_handles()
221 num_adapters--; in ehea_update_firmware_handles()
247 struct ehea_port *port = adapter->port[k]; in ehea_update_bcmc_registrations() local
249 if (!port || (port->state != EHEA_PORT_UP)) in ehea_update_bcmc_registrations()
254 list_for_each_entry(mc_entry, &port->mc_list->list,list) in ehea_update_bcmc_registrations()
267 struct ehea_port *port = adapter->port[k]; in ehea_update_bcmc_registrations() local
269 if (!port || (port->state != EHEA_PORT_UP)) in ehea_update_bcmc_registrations()
275 arr[i].adh = adapter->handle; in ehea_update_bcmc_registrations()
276 arr[i].port_id = port->logical_port_id; in ehea_update_bcmc_registrations()
279 arr[i++].macaddr = port->mac_addr; in ehea_update_bcmc_registrations()
281 arr[i].adh = adapter->handle; in ehea_update_bcmc_registrations()
282 arr[i].port_id = port->logical_port_id; in ehea_update_bcmc_registrations()
285 arr[i++].macaddr = port->mac_addr; in ehea_update_bcmc_registrations()
286 num_registrations -= 2; in ehea_update_bcmc_registrations()
289 &port->mc_list->list, list) { in ehea_update_bcmc_registrations()
293 arr[i].adh = adapter->handle; in ehea_update_bcmc_registrations()
294 arr[i].port_id = port->logical_port_id; in ehea_update_bcmc_registrations()
297 if (mc_entry->macaddr == 0) in ehea_update_bcmc_registrations()
299 arr[i++].macaddr = mc_entry->macaddr; in ehea_update_bcmc_registrations()
301 arr[i].adh = adapter->handle; in ehea_update_bcmc_registrations()
302 arr[i].port_id = port->logical_port_id; in ehea_update_bcmc_registrations()
305 if (mc_entry->macaddr == 0) in ehea_update_bcmc_registrations()
307 arr[i++].macaddr = mc_entry->macaddr; in ehea_update_bcmc_registrations()
308 num_registrations -= 2; in ehea_update_bcmc_registrations()
324 struct ehea_port *port = netdev_priv(dev); in ehea_get_stats64() local
328 for (i = 0; i < port->num_def_qps; i++) { in ehea_get_stats64()
329 rx_packets += port->port_res[i].rx_packets; in ehea_get_stats64()
330 rx_bytes += port->port_res[i].rx_bytes; in ehea_get_stats64()
333 for (i = 0; i < port->num_def_qps; i++) { in ehea_get_stats64()
334 tx_packets += port->port_res[i].tx_packets; in ehea_get_stats64()
335 tx_bytes += port->port_res[i].tx_bytes; in ehea_get_stats64()
338 stats->tx_packets = tx_packets; in ehea_get_stats64()
339 stats->rx_bytes = rx_bytes; in ehea_get_stats64()
340 stats->tx_bytes = tx_bytes; in ehea_get_stats64()
341 stats->rx_packets = rx_packets; in ehea_get_stats64()
343 stats->multicast = port->stats.multicast; in ehea_get_stats64()
344 stats->rx_errors = port->stats.rx_errors; in ehea_get_stats64()
349 struct ehea_port *port = in ehea_update_stats() local
351 struct net_device *dev = port->netdev; in ehea_update_stats()
352 struct rtnl_link_stats64 *stats = &port->stats; in ehea_update_stats()
362 hret = ehea_h_query_ehea_port(port->adapter->handle, in ehea_update_stats()
363 port->logical_port_id, in ehea_update_stats()
370 if (netif_msg_hw(port)) in ehea_update_stats()
373 stats->multicast = cb2->rxmcp; in ehea_update_stats()
374 stats->rx_errors = cb2->rxuerr; in ehea_update_stats()
379 schedule_delayed_work(&port->stats_work, in ehea_update_stats()
385 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_refill_rq1()
386 struct net_device *dev = pr->port->netdev; in ehea_refill_rq1()
387 int max_index_mask = pr->rq1_skba.len - 1; in ehea_refill_rq1()
388 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; in ehea_refill_rq1()
392 pr->rq1_skba.os_skbs = 0; in ehea_refill_rq1()
396 pr->rq1_skba.index = index; in ehea_refill_rq1()
397 pr->rq1_skba.os_skbs = fill_wqes; in ehea_refill_rq1()
406 pr->rq1_skba.os_skbs = fill_wqes - i; in ehea_refill_rq1()
410 index--; in ehea_refill_rq1()
419 ehea_update_rq1a(pr->qp, adder); in ehea_refill_rq1()
424 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_init_fill_rq1()
425 struct net_device *dev = pr->port->netdev; in ehea_init_fill_rq1()
428 if (nr_rq1a > pr->rq1_skba.len) { in ehea_init_fill_rq1()
439 ehea_update_rq1a(pr->qp, i - 1); in ehea_init_fill_rq1()
446 struct net_device *dev = pr->port->netdev; in ehea_refill_rq_def()
447 struct ehea_qp *qp = pr->qp; in ehea_refill_rq_def()
448 struct sk_buff **skb_arr = q_skba->arr; in ehea_refill_rq_def()
454 fill_wqes = q_skba->os_skbs + num_wqes; in ehea_refill_rq_def()
455 q_skba->os_skbs = 0; in ehea_refill_rq_def()
458 q_skba->os_skbs = fill_wqes; in ehea_refill_rq_def()
462 index = q_skba->index; in ehea_refill_rq_def()
463 max_index_mask = q_skba->len - 1; in ehea_refill_rq_def()
470 q_skba->os_skbs = fill_wqes - i; in ehea_refill_rq_def()
471 if (q_skba->os_skbs == q_skba->len - 2) { in ehea_refill_rq_def()
472 netdev_info(pr->port->netdev, in ehea_refill_rq_def()
473 "rq%i ran dry - no mem for skb\n", in ehea_refill_rq_def()
475 ret = -ENOMEM; in ehea_refill_rq_def()
481 tmp_addr = ehea_map_vaddr(skb->data); in ehea_refill_rq_def()
482 if (tmp_addr == -1) { in ehea_refill_rq_def()
484 q_skba->os_skbs = fill_wqes - i; in ehea_refill_rq_def()
490 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) in ehea_refill_rq_def()
492 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; in ehea_refill_rq_def()
493 rwqe->sg_list[0].vaddr = tmp_addr; in ehea_refill_rq_def()
494 rwqe->sg_list[0].len = packet_size; in ehea_refill_rq_def()
495 rwqe->data_segments = 1; in ehea_refill_rq_def()
502 q_skba->index = index; in ehea_refill_rq_def()
509 ehea_update_rq2a(pr->qp, adder); in ehea_refill_rq_def()
511 ehea_update_rq3a(pr->qp, adder); in ehea_refill_rq_def()
519 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, in ehea_refill_rq2()
527 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, in ehea_refill_rq3()
534 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; in ehea_check_cqe()
535 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) in ehea_check_cqe()
537 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && in ehea_check_cqe()
538 (cqe->header_length == 0)) in ehea_check_cqe()
540 return -EINVAL; in ehea_check_cqe()
547 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ in ehea_fill_skb()
550 skb->protocol = eth_type_trans(skb, dev); in ehea_fill_skb()
554 if (cqe->status & EHEA_CQE_BLIND_CKSUM) { in ehea_fill_skb()
555 skb->ip_summed = CHECKSUM_COMPLETE; in ehea_fill_skb()
556 skb->csum = csum_unfold(~cqe->inet_checksum_value); in ehea_fill_skb()
558 skb->ip_summed = CHECKSUM_UNNECESSARY; in ehea_fill_skb()
560 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]); in ehea_fill_skb()
567 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); in get_skb_by_index()
572 x = skb_index + 1; in get_skb_by_index()
573 x &= (arr_len - 1); in get_skb_by_index()
580 pref = (skb_array[x]->data); in get_skb_by_index()
599 x = wqe_index + 1; in get_skb_by_index_ll()
600 x &= (arr_len - 1); in get_skb_by_index_ll()
607 pref = (skb_array[x]->data); in get_skb_by_index_ll()
623 if (cqe->status & EHEA_CQE_STAT_ERR_TCP) in ehea_treat_poll_error()
624 pr->p_stats.err_tcp_cksum++; in ehea_treat_poll_error()
625 if (cqe->status & EHEA_CQE_STAT_ERR_IP) in ehea_treat_poll_error()
626 pr->p_stats.err_ip_cksum++; in ehea_treat_poll_error()
627 if (cqe->status & EHEA_CQE_STAT_ERR_CRC) in ehea_treat_poll_error()
628 pr->p_stats.err_frame_crc++; in ehea_treat_poll_error()
631 *processed_rq2 += 1; in ehea_treat_poll_error()
632 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); in ehea_treat_poll_error()
635 *processed_rq3 += 1; in ehea_treat_poll_error()
636 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); in ehea_treat_poll_error()
640 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { in ehea_treat_poll_error()
641 if (netif_msg_rx_err(pr->port)) { in ehea_treat_poll_error()
642 pr_err("Critical receive error for QP %d. Resetting port.\n", in ehea_treat_poll_error()
643 pr->qp->init_attr.qp_nr); in ehea_treat_poll_error()
646 ehea_schedule_port_reset(pr->port); in ehea_treat_poll_error()
647 return 1; in ehea_treat_poll_error()
657 struct ehea_port *port = pr->port; in ehea_proc_rwqes() local
658 struct ehea_qp *qp = pr->qp; in ehea_proc_rwqes()
661 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_proc_rwqes()
662 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; in ehea_proc_rwqes()
663 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; in ehea_proc_rwqes()
664 int skb_arr_rq1_len = pr->rq1_skba.len; in ehea_proc_rwqes()
665 int skb_arr_rq2_len = pr->rq2_skba.len; in ehea_proc_rwqes()
666 int skb_arr_rq3_len = pr->rq3_skba.len; in ehea_proc_rwqes()
679 if (netif_msg_rx_status(port)) in ehea_proc_rwqes()
685 if (rq == 1) { in ehea_proc_rwqes()
691 netif_info(port, rx_err, dev, in ehea_proc_rwqes()
700 cqe->num_bytes_transfered - 4); in ehea_proc_rwqes()
707 netif_err(port, rx_err, dev, in ehea_proc_rwqes()
718 netif_err(port, rx_err, dev, in ehea_proc_rwqes()
726 processed_bytes += skb->len; in ehea_proc_rwqes()
728 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) in ehea_proc_rwqes()
730 cqe->vlan_tag); in ehea_proc_rwqes()
732 napi_gro_receive(&pr->napi, skb); in ehea_proc_rwqes()
734 pr->p_stats.poll_receive_errors++; in ehea_proc_rwqes()
744 pr->rx_packets += processed; in ehea_proc_rwqes()
745 pr->rx_bytes += processed_bytes; in ehea_proc_rwqes()
756 static void reset_sq_restart_flag(struct ehea_port *port) in reset_sq_restart_flag() argument
760 for (i = 0; i < port->num_def_qps; i++) { in reset_sq_restart_flag()
761 struct ehea_port_res *pr = &port->port_res[i]; in reset_sq_restart_flag()
762 pr->sq_restart_flag = 0; in reset_sq_restart_flag()
764 wake_up(&port->restart_wq); in reset_sq_restart_flag()
767 static void check_sqs(struct ehea_port *port) in check_sqs() argument
773 for (i = 0; i < port->num_def_qps; i++) { in check_sqs()
774 struct ehea_port_res *pr = &port->port_res[i]; in check_sqs()
776 swqe = ehea_get_swqe(pr->qp, &swqe_index); in check_sqs()
778 atomic_dec(&pr->swqe_avail); in check_sqs()
780 swqe->tx_control |= EHEA_SWQE_PURGE; in check_sqs()
781 swqe->wr_id = SWQE_RESTART_CHECK; in check_sqs()
782 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; in check_sqs()
783 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT; in check_sqs()
784 swqe->immediate_data_length = 80; in check_sqs()
786 ehea_post_swqe(pr->qp, swqe); in check_sqs()
788 ret = wait_event_timeout(port->restart_wq, in check_sqs()
789 pr->sq_restart_flag == 0, in check_sqs()
794 ehea_schedule_port_reset(pr->port); in check_sqs()
804 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes()
810 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev, in ehea_proc_cqes()
811 pr - &pr->port->port_res[0]); in ehea_proc_cqes()
820 if (cqe->wr_id == SWQE_RESTART_CHECK) { in ehea_proc_cqes()
821 pr->sq_restart_flag = 1; in ehea_proc_cqes()
826 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { in ehea_proc_cqes()
828 cqe->status); in ehea_proc_cqes()
830 if (netif_msg_tx_err(pr->port)) in ehea_proc_cqes()
833 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { in ehea_proc_cqes()
834 pr_err("Resetting port\n"); in ehea_proc_cqes()
835 ehea_schedule_port_reset(pr->port); in ehea_proc_cqes()
840 if (netif_msg_tx_done(pr->port)) in ehea_proc_cqes()
843 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) in ehea_proc_cqes()
846 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); in ehea_proc_cqes()
847 skb = pr->sq_skba.arr[index]; in ehea_proc_cqes()
849 pr->sq_skba.arr[index] = NULL; in ehea_proc_cqes()
852 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); in ehea_proc_cqes()
853 quota--; in ehea_proc_cqes()
859 atomic_add(swqe_av, &pr->swqe_avail); in ehea_proc_cqes()
862 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) { in ehea_proc_cqes()
865 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th)) in ehea_proc_cqes()
870 wake_up(&pr->port->swqe_avail_wq); in ehea_proc_cqes()
881 struct net_device *dev = pr->port->netdev; in ehea_poll()
888 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
892 ehea_reset_cq_ep(pr->recv_cq); in ehea_poll()
893 ehea_reset_cq_ep(pr->send_cq); in ehea_poll()
894 ehea_reset_cq_n1(pr->recv_cq); in ehea_poll()
895 ehea_reset_cq_n1(pr->send_cq); in ehea_poll()
897 cqe = ehea_poll_rq1(pr->qp, &wqe_index); in ehea_poll()
898 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll()
907 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
917 napi_schedule(&pr->napi); in ehea_recv_irq_handler()
924 struct ehea_port *port = param; in ehea_qp_aff_irq_handler() local
931 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler()
934 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); in ehea_qp_aff_irq_handler()
936 eqe->entry, qp_token); in ehea_qp_aff_irq_handler()
938 qp = port->port_res[qp_token].qp; in ehea_qp_aff_irq_handler()
940 resource_type = ehea_error_data(port->adapter, qp->fw_handle, in ehea_qp_aff_irq_handler()
946 reset_port = 1; in ehea_qp_aff_irq_handler()
948 reset_port = 1; /* Reset in case of CQ or EQ error */ in ehea_qp_aff_irq_handler()
950 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler()
954 pr_err("Resetting port\n"); in ehea_qp_aff_irq_handler()
955 ehea_schedule_port_reset(port); in ehea_qp_aff_irq_handler()
967 if (adapter->port[i]) in ehea_get_port()
968 if (adapter->port[i]->logical_port_id == logical_port) in ehea_get_port()
969 return adapter->port[i]; in ehea_get_port()
973 int ehea_sense_port_attr(struct ehea_port *port) in ehea_sense_port_attr() argument
983 ret = -ENOMEM; in ehea_sense_port_attr()
987 hret = ehea_h_query_ehea_port(port->adapter->handle, in ehea_sense_port_attr()
988 port->logical_port_id, H_PORT_CB0, in ehea_sense_port_attr()
992 ret = -EIO; in ehea_sense_port_attr()
997 port->mac_addr = cb0->port_mac_addr << 16; in ehea_sense_port_attr()
999 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) { in ehea_sense_port_attr()
1000 ret = -EADDRNOTAVAIL; in ehea_sense_port_attr()
1004 /* Port speed */ in ehea_sense_port_attr()
1005 switch (cb0->port_speed) { in ehea_sense_port_attr()
1007 port->port_speed = EHEA_SPEED_10M; in ehea_sense_port_attr()
1008 port->full_duplex = 0; in ehea_sense_port_attr()
1011 port->port_speed = EHEA_SPEED_10M; in ehea_sense_port_attr()
1012 port->full_duplex = 1; in ehea_sense_port_attr()
1015 port->port_speed = EHEA_SPEED_100M; in ehea_sense_port_attr()
1016 port->full_duplex = 0; in ehea_sense_port_attr()
1019 port->port_speed = EHEA_SPEED_100M; in ehea_sense_port_attr()
1020 port->full_duplex = 1; in ehea_sense_port_attr()
1023 port->port_speed = EHEA_SPEED_1G; in ehea_sense_port_attr()
1024 port->full_duplex = 1; in ehea_sense_port_attr()
1027 port->port_speed = EHEA_SPEED_10G; in ehea_sense_port_attr()
1028 port->full_duplex = 1; in ehea_sense_port_attr()
1031 port->port_speed = 0; in ehea_sense_port_attr()
1032 port->full_duplex = 0; in ehea_sense_port_attr()
1036 port->autoneg = 1; in ehea_sense_port_attr()
1037 port->num_mcs = cb0->num_default_qps; in ehea_sense_port_attr()
1041 port->num_def_qps = cb0->num_default_qps; in ehea_sense_port_attr()
1043 port->num_def_qps = 1; in ehea_sense_port_attr()
1045 if (!port->num_def_qps) { in ehea_sense_port_attr()
1046 ret = -EINVAL; in ehea_sense_port_attr()
1052 if (ret || netif_msg_probe(port)) in ehea_sense_port_attr()
1059 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) in ehea_set_portspeed() argument
1068 ret = -ENOMEM; in ehea_set_portspeed()
1072 cb4->port_speed = port_speed; in ehea_set_portspeed()
1074 netif_carrier_off(port->netdev); in ehea_set_portspeed()
1076 hret = ehea_h_modify_ehea_port(port->adapter->handle, in ehea_set_portspeed()
1077 port->logical_port_id, in ehea_set_portspeed()
1080 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0; in ehea_set_portspeed()
1082 hret = ehea_h_query_ehea_port(port->adapter->handle, in ehea_set_portspeed()
1083 port->logical_port_id, in ehea_set_portspeed()
1087 switch (cb4->port_speed) { in ehea_set_portspeed()
1089 port->port_speed = EHEA_SPEED_10M; in ehea_set_portspeed()
1090 port->full_duplex = 0; in ehea_set_portspeed()
1093 port->port_speed = EHEA_SPEED_10M; in ehea_set_portspeed()
1094 port->full_duplex = 1; in ehea_set_portspeed()
1097 port->port_speed = EHEA_SPEED_100M; in ehea_set_portspeed()
1098 port->full_duplex = 0; in ehea_set_portspeed()
1101 port->port_speed = EHEA_SPEED_100M; in ehea_set_portspeed()
1102 port->full_duplex = 1; in ehea_set_portspeed()
1105 port->port_speed = EHEA_SPEED_1G; in ehea_set_portspeed()
1106 port->full_duplex = 1; in ehea_set_portspeed()
1109 port->port_speed = EHEA_SPEED_10G; in ehea_set_portspeed()
1110 port->full_duplex = 1; in ehea_set_portspeed()
1113 port->port_speed = 0; in ehea_set_portspeed()
1114 port->full_duplex = 0; in ehea_set_portspeed()
1118 pr_err("Failed sensing port speed\n"); in ehea_set_portspeed()
1119 ret = -EIO; in ehea_set_portspeed()
1123 pr_info("Hypervisor denied setting port speed\n"); in ehea_set_portspeed()
1124 ret = -EPERM; in ehea_set_portspeed()
1126 ret = -EIO; in ehea_set_portspeed()
1127 pr_err("Failed setting port speed\n"); in ehea_set_portspeed()
1130 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) in ehea_set_portspeed()
1131 netif_carrier_on(port->netdev); in ehea_set_portspeed()
1143 struct ehea_port *port; in ehea_parse_eqe() local
1148 port = ehea_get_port(adapter, portnum); in ehea_parse_eqe()
1149 if (!port) { in ehea_parse_eqe()
1153 dev = port->netdev; in ehea_parse_eqe()
1156 case EHEA_EC_PORTSTATE_CHG: /* port state change */ in ehea_parse_eqe()
1160 ret = ehea_sense_port_attr(port); in ehea_parse_eqe()
1162 netdev_err(dev, "failed resensing port attributes\n"); in ehea_parse_eqe()
1166 netif_info(port, link, dev, in ehea_parse_eqe()
1167 "Logical port up: %dMbps %s Duplex\n", in ehea_parse_eqe()
1168 port->port_speed, in ehea_parse_eqe()
1169 port->full_duplex == 1 ? in ehea_parse_eqe()
1177 netif_info(port, link, dev, in ehea_parse_eqe()
1178 "Logical port down\n"); in ehea_parse_eqe()
1184 port->phy_link = EHEA_PHY_LINK_UP; in ehea_parse_eqe()
1185 netif_info(port, link, dev, in ehea_parse_eqe()
1186 "Physical port up\n"); in ehea_parse_eqe()
1190 port->phy_link = EHEA_PHY_LINK_DOWN; in ehea_parse_eqe()
1191 netif_info(port, link, dev, in ehea_parse_eqe()
1192 "Physical port down\n"); in ehea_parse_eqe()
1199 "External switch port is primary port\n"); in ehea_parse_eqe()
1202 "External switch port is backup port\n"); in ehea_parse_eqe()
1209 netdev_info(dev, "Port malfunction\n"); in ehea_parse_eqe()
1225 eqe = ehea_poll_eq(adapter->neq); in ehea_neq_tasklet()
1229 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry); in ehea_neq_tasklet()
1230 ehea_parse_eqe(adapter, eqe->entry); in ehea_neq_tasklet()
1231 eqe = ehea_poll_eq(adapter->neq); in ehea_neq_tasklet()
1235 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) in ehea_neq_tasklet()
1236 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1) in ehea_neq_tasklet()
1237 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1); in ehea_neq_tasklet()
1239 ehea_h_reset_events(adapter->handle, in ehea_neq_tasklet()
1240 adapter->neq->fw_handle, event_mask); in ehea_neq_tasklet()
1246 tasklet_hi_schedule(&adapter->neq_tasklet); in ehea_interrupt_neq()
1254 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; in ehea_fill_port_res()
1256 ehea_init_fill_rq1(pr, pr->rq1_skba.len); in ehea_fill_port_res()
1258 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); in ehea_fill_port_res()
1260 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); in ehea_fill_port_res()
1267 struct ehea_port *port = netdev_priv(dev); in ehea_reg_interrupts() local
1272 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", in ehea_reg_interrupts()
1273 dev->name); in ehea_reg_interrupts()
1275 ret = ibmebus_request_irq(port->qp_eq->attr.ist1, in ehea_reg_interrupts()
1277 0, port->int_aff_name, port); in ehea_reg_interrupts()
1280 port->qp_eq->attr.ist1); in ehea_reg_interrupts()
1284 netif_info(port, ifup, dev, in ehea_reg_interrupts()
1286 port->qp_eq->attr.ist1); in ehea_reg_interrupts()
1289 for (i = 0; i < port->num_def_qps; i++) { in ehea_reg_interrupts()
1290 pr = &port->port_res[i]; in ehea_reg_interrupts()
1291 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, in ehea_reg_interrupts()
1292 "%s-queue%d", dev->name, i); in ehea_reg_interrupts()
1293 ret = ibmebus_request_irq(pr->eq->attr.ist1, in ehea_reg_interrupts()
1295 0, pr->int_send_name, pr); in ehea_reg_interrupts()
1298 i, pr->eq->attr.ist1); in ehea_reg_interrupts()
1301 netif_info(port, ifup, dev, in ehea_reg_interrupts()
1303 pr->eq->attr.ist1, i); in ehea_reg_interrupts()
1310 while (--i >= 0) { in ehea_reg_interrupts()
1311 u32 ist = port->port_res[i].eq->attr.ist1; in ehea_reg_interrupts()
1312 ibmebus_free_irq(ist, &port->port_res[i]); in ehea_reg_interrupts()
1316 ibmebus_free_irq(port->qp_eq->attr.ist1, port); in ehea_reg_interrupts()
1317 i = port->num_def_qps; in ehea_reg_interrupts()
1325 struct ehea_port *port = netdev_priv(dev); in ehea_free_interrupts() local
1331 for (i = 0; i < port->num_def_qps; i++) { in ehea_free_interrupts()
1332 pr = &port->port_res[i]; in ehea_free_interrupts()
1333 ibmebus_free_irq(pr->eq->attr.ist1, pr); in ehea_free_interrupts()
1334 netif_info(port, intr, dev, in ehea_free_interrupts()
1336 i, pr->eq->attr.ist1); in ehea_free_interrupts()
1340 ibmebus_free_irq(port->qp_eq->attr.ist1, port); in ehea_free_interrupts()
1341 netif_info(port, intr, dev, in ehea_free_interrupts()
1343 port->qp_eq->attr.ist1); in ehea_free_interrupts()
1346 static int ehea_configure_port(struct ehea_port *port) in ehea_configure_port() argument
1352 ret = -ENOMEM; in ehea_configure_port()
1357 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1) in ehea_configure_port()
1358 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1) in ehea_configure_port()
1359 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1) in ehea_configure_port()
1360 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1) in ehea_configure_port()
1363 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1); in ehea_configure_port()
1365 for (i = 0; i < port->num_mcs; i++) in ehea_configure_port()
1367 cb0->default_qpn_arr[i] = in ehea_configure_port()
1368 port->port_res[i].qp->init_attr.qp_nr; in ehea_configure_port()
1370 cb0->default_qpn_arr[i] = in ehea_configure_port()
1371 port->port_res[0].qp->init_attr.qp_nr; in ehea_configure_port()
1373 if (netif_msg_ifup(port)) in ehea_configure_port()
1376 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1) in ehea_configure_port()
1377 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1); in ehea_configure_port()
1379 hret = ehea_h_modify_ehea_port(port->adapter->handle, in ehea_configure_port()
1380 port->logical_port_id, in ehea_configure_port()
1382 ret = -EIO; in ehea_configure_port()
1397 struct ehea_adapter *adapter = pr->port->adapter; in ehea_gen_smrs()
1399 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); in ehea_gen_smrs()
1403 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); in ehea_gen_smrs()
1410 ehea_rem_mr(&pr->send_mr); in ehea_gen_smrs()
1413 return -EIO; in ehea_gen_smrs()
1418 if ((ehea_rem_mr(&pr->send_mr)) || in ehea_rem_smrs()
1419 (ehea_rem_mr(&pr->recv_mr))) in ehea_rem_smrs()
1420 return -EIO; in ehea_rem_smrs()
1429 q_skba->arr = vzalloc(arr_size); in ehea_init_q_skba()
1430 if (!q_skba->arr) in ehea_init_q_skba()
1431 return -ENOMEM; in ehea_init_q_skba()
1433 q_skba->len = max_q_entries; in ehea_init_q_skba()
1434 q_skba->index = 0; in ehea_init_q_skba()
1435 q_skba->os_skbs = 0; in ehea_init_q_skba()
1440 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, in ehea_init_port_res() argument
1443 struct ehea_adapter *adapter = port->adapter; in ehea_init_port_res()
1446 int ret = -EIO; in ehea_init_port_res()
1449 tx_bytes = pr->tx_bytes; in ehea_init_port_res()
1450 tx_packets = pr->tx_packets; in ehea_init_port_res()
1451 rx_bytes = pr->rx_bytes; in ehea_init_port_res()
1452 rx_packets = pr->rx_packets; in ehea_init_port_res()
1456 pr->tx_bytes = tx_bytes; in ehea_init_port_res()
1457 pr->tx_packets = tx_packets; in ehea_init_port_res()
1458 pr->rx_bytes = rx_bytes; in ehea_init_port_res()
1459 pr->rx_packets = rx_packets; in ehea_init_port_res()
1461 pr->port = port; in ehea_init_port_res()
1463 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); in ehea_init_port_res()
1464 if (!pr->eq) { in ehea_init_port_res()
1469 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, in ehea_init_port_res()
1470 pr->eq->fw_handle, in ehea_init_port_res()
1471 port->logical_port_id); in ehea_init_port_res()
1472 if (!pr->recv_cq) { in ehea_init_port_res()
1477 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res()
1478 pr->eq->fw_handle, in ehea_init_port_res()
1479 port->logical_port_id); in ehea_init_port_res()
1480 if (!pr->send_cq) { in ehea_init_port_res()
1485 if (netif_msg_ifup(port)) in ehea_init_port_res()
1487 pr->send_cq->attr.act_nr_of_cqes, in ehea_init_port_res()
1488 pr->recv_cq->attr.act_nr_of_cqes); in ehea_init_port_res()
1492 ret = -ENOMEM; in ehea_init_port_res()
1497 init_attr->low_lat_rq1 = 1; in ehea_init_port_res()
1498 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */ in ehea_init_port_res()
1499 init_attr->rq_count = 3; in ehea_init_port_res()
1500 init_attr->qp_token = queue_token; in ehea_init_port_res()
1501 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq; in ehea_init_port_res()
1502 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1; in ehea_init_port_res()
1503 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2; in ehea_init_port_res()
1504 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3; in ehea_init_port_res()
1505 init_attr->wqe_size_enc_sq = EHEA_SG_SQ; in ehea_init_port_res()
1506 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1; in ehea_init_port_res()
1507 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2; in ehea_init_port_res()
1508 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3; in ehea_init_port_res()
1509 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD; in ehea_init_port_res()
1510 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD; in ehea_init_port_res()
1511 init_attr->port_nr = port->logical_port_id; in ehea_init_port_res()
1512 init_attr->send_cq_handle = pr->send_cq->fw_handle; in ehea_init_port_res()
1513 init_attr->recv_cq_handle = pr->recv_cq->fw_handle; in ehea_init_port_res()
1514 init_attr->aff_eq_handle = port->qp_eq->fw_handle; in ehea_init_port_res()
1516 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); in ehea_init_port_res()
1517 if (!pr->qp) { in ehea_init_port_res()
1519 ret = -EIO; in ehea_init_port_res()
1523 if (netif_msg_ifup(port)) in ehea_init_port_res()
1525 init_attr->qp_nr, in ehea_init_port_res()
1526 init_attr->act_nr_send_wqes, in ehea_init_port_res()
1527 init_attr->act_nr_rwqes_rq1, in ehea_init_port_res()
1528 init_attr->act_nr_rwqes_rq2, in ehea_init_port_res()
1529 init_attr->act_nr_rwqes_rq3); in ehea_init_port_res()
1531 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; in ehea_init_port_res()
1533 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size); in ehea_init_port_res()
1534 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); in ehea_init_port_res()
1535 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); in ehea_init_port_res()
1536 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); in ehea_init_port_res()
1540 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; in ehea_init_port_res()
1542 ret = -EIO; in ehea_init_port_res()
1546 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); in ehea_init_port_res()
1550 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll); in ehea_init_port_res()
1557 vfree(pr->sq_skba.arr); in ehea_init_port_res()
1558 vfree(pr->rq1_skba.arr); in ehea_init_port_res()
1559 vfree(pr->rq2_skba.arr); in ehea_init_port_res()
1560 vfree(pr->rq3_skba.arr); in ehea_init_port_res()
1561 ehea_destroy_qp(pr->qp); in ehea_init_port_res()
1562 ehea_destroy_cq(pr->send_cq); in ehea_init_port_res()
1563 ehea_destroy_cq(pr->recv_cq); in ehea_init_port_res()
1564 ehea_destroy_eq(pr->eq); in ehea_init_port_res()
1569 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) in ehea_clean_portres() argument
1573 if (pr->qp) in ehea_clean_portres()
1574 netif_napi_del(&pr->napi); in ehea_clean_portres()
1576 ret = ehea_destroy_qp(pr->qp); in ehea_clean_portres()
1579 ehea_destroy_cq(pr->send_cq); in ehea_clean_portres()
1580 ehea_destroy_cq(pr->recv_cq); in ehea_clean_portres()
1581 ehea_destroy_eq(pr->eq); in ehea_clean_portres()
1583 for (i = 0; i < pr->rq1_skba.len; i++) in ehea_clean_portres()
1584 dev_kfree_skb(pr->rq1_skba.arr[i]); in ehea_clean_portres()
1586 for (i = 0; i < pr->rq2_skba.len; i++) in ehea_clean_portres()
1587 dev_kfree_skb(pr->rq2_skba.arr[i]); in ehea_clean_portres()
1589 for (i = 0; i < pr->rq3_skba.len; i++) in ehea_clean_portres()
1590 dev_kfree_skb(pr->rq3_skba.arr[i]); in ehea_clean_portres()
1592 for (i = 0; i < pr->sq_skba.len; i++) in ehea_clean_portres()
1593 dev_kfree_skb(pr->sq_skba.arr[i]); in ehea_clean_portres()
1595 vfree(pr->rq1_skba.arr); in ehea_clean_portres()
1596 vfree(pr->rq2_skba.arr); in ehea_clean_portres()
1597 vfree(pr->rq3_skba.arr); in ehea_clean_portres()
1598 vfree(pr->sq_skba.arr); in ehea_clean_portres()
1608 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; in write_swqe2_immediate()
1609 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; in write_swqe2_immediate()
1612 swqe->descriptors = 0; in write_swqe2_immediate()
1615 swqe->tx_control |= EHEA_SWQE_TSO; in write_swqe2_immediate()
1616 swqe->mss = skb_shinfo(skb)->gso_size; in write_swqe2_immediate()
1626 swqe->immediate_data_length = immediate_len; in write_swqe2_immediate()
1629 sg1entry->l_key = lkey; in write_swqe2_immediate()
1630 sg1entry->len = skb_data_size - immediate_len; in write_swqe2_immediate()
1631 sg1entry->vaddr = in write_swqe2_immediate()
1632 ehea_map_vaddr(skb->data + immediate_len); in write_swqe2_immediate()
1633 swqe->descriptors++; in write_swqe2_immediate()
1637 swqe->immediate_data_length = skb_data_size; in write_swqe2_immediate()
1648 nfrags = skb_shinfo(skb)->nr_frags; in write_swqe2_data()
1649 sg1entry = &swqe->u.immdata_desc.sg_entry; in write_swqe2_data()
1650 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list; in write_swqe2_data()
1657 if (swqe->descriptors == 0) { in write_swqe2_data()
1659 frag = &skb_shinfo(skb)->frags[0]; in write_swqe2_data()
1662 sg1entry->l_key = lkey; in write_swqe2_data()
1663 sg1entry->len = skb_frag_size(frag); in write_swqe2_data()
1664 sg1entry->vaddr = in write_swqe2_data()
1666 swqe->descriptors++; in write_swqe2_data()
1667 sg1entry_contains_frag_data = 1; in write_swqe2_data()
1672 frag = &skb_shinfo(skb)->frags[i]; in write_swqe2_data()
1673 sgentry = &sg_list[i - sg1entry_contains_frag_data]; in write_swqe2_data()
1675 sgentry->l_key = lkey; in write_swqe2_data()
1676 sgentry->len = skb_frag_size(frag); in write_swqe2_data()
1677 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag)); in write_swqe2_data()
1678 swqe->descriptors++; in write_swqe2_data()
1683 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) in ehea_broadcast_reg_helper() argument
1691 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, in ehea_broadcast_reg_helper()
1692 port->logical_port_id, in ehea_broadcast_reg_helper()
1693 reg_type, port->mac_addr, 0, hcallid); in ehea_broadcast_reg_helper()
1697 ret = -EIO; in ehea_broadcast_reg_helper()
1703 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, in ehea_broadcast_reg_helper()
1704 port->logical_port_id, in ehea_broadcast_reg_helper()
1705 reg_type, port->mac_addr, 0, hcallid); in ehea_broadcast_reg_helper()
1709 ret = -EIO; in ehea_broadcast_reg_helper()
1717 struct ehea_port *port = netdev_priv(dev); in ehea_set_mac_addr() local
1723 if (!is_valid_ether_addr(mac_addr->sa_data)) { in ehea_set_mac_addr()
1724 ret = -EADDRNOTAVAIL; in ehea_set_mac_addr()
1731 ret = -ENOMEM; in ehea_set_mac_addr()
1735 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN); in ehea_set_mac_addr()
1737 cb0->port_mac_addr = cb0->port_mac_addr >> 16; in ehea_set_mac_addr()
1739 hret = ehea_h_modify_ehea_port(port->adapter->handle, in ehea_set_mac_addr()
1740 port->logical_port_id, H_PORT_CB0, in ehea_set_mac_addr()
1741 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0); in ehea_set_mac_addr()
1743 ret = -EIO; in ehea_set_mac_addr()
1747 eth_hw_addr_set(dev, mac_addr->sa_data); in ehea_set_mac_addr()
1750 if (port->state == EHEA_PORT_UP) { in ehea_set_mac_addr()
1751 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); in ehea_set_mac_addr()
1756 port->mac_addr = cb0->port_mac_addr << 16; in ehea_set_mac_addr()
1759 if (port->state == EHEA_PORT_UP) { in ehea_set_mac_addr()
1760 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); in ehea_set_mac_addr()
1779 enable == 1 ? "en" : "dis"); in ehea_promiscuous_error()
1782 enable == 1 ? "en" : "dis"); in ehea_promiscuous_error()
1787 struct ehea_port *port = netdev_priv(dev); in ehea_promiscuous() local
1791 if (enable == port->promisc) in ehea_promiscuous()
1801 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0; in ehea_promiscuous()
1803 hret = ehea_h_modify_ehea_port(port->adapter->handle, in ehea_promiscuous()
1804 port->logical_port_id, in ehea_promiscuous()
1811 port->promisc = enable; in ehea_promiscuous()
1816 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, in ehea_multicast_reg_helper() argument
1826 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, in ehea_multicast_reg_helper()
1827 port->logical_port_id, in ehea_multicast_reg_helper()
1836 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, in ehea_multicast_reg_helper()
1837 port->logical_port_id, in ehea_multicast_reg_helper()
1845 struct ehea_port *port = netdev_priv(dev); in ehea_drop_multicast_list() local
1846 struct ehea_mc_list *mc_entry = port->mc_list; in ehea_drop_multicast_list()
1852 list_for_each_safe(pos, temp, &(port->mc_list->list)) { in ehea_drop_multicast_list()
1855 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, in ehea_drop_multicast_list()
1859 ret = -EIO; in ehea_drop_multicast_list()
1870 struct ehea_port *port = netdev_priv(dev); in ehea_allmulti() local
1873 if (!port->allmulti) { in ehea_allmulti()
1877 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC); in ehea_allmulti()
1879 port->allmulti = 1; in ehea_allmulti()
1887 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC); in ehea_allmulti()
1889 port->allmulti = 0; in ehea_allmulti()
1897 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) in ehea_add_multicast_entry() argument
1906 INIT_LIST_HEAD(&ehea_mcl_entry->list); in ehea_add_multicast_entry()
1908 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN); in ehea_add_multicast_entry()
1910 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr, in ehea_add_multicast_entry()
1913 list_add(&ehea_mcl_entry->list, &port->mc_list->list); in ehea_add_multicast_entry()
1922 struct ehea_port *port = netdev_priv(dev); in ehea_set_multicast_list() local
1926 ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC)); in ehea_set_multicast_list()
1928 if (dev->flags & IFF_ALLMULTI) { in ehea_set_multicast_list()
1929 ehea_allmulti(dev, 1); in ehea_set_multicast_list()
1940 ehea_allmulti(dev, 1); in ehea_set_multicast_list()
1943 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { in ehea_set_multicast_list()
1945 port->adapter->max_mc_mac); in ehea_set_multicast_list()
1950 ehea_add_multicast_entry(port, ha->addr); in ehea_set_multicast_list()
1959 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; in xmit_common()
1964 if (skb->ip_summed == CHECKSUM_PARTIAL) in xmit_common()
1965 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM; in xmit_common()
1967 swqe->ip_start = skb_network_offset(skb); in xmit_common()
1968 swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1; in xmit_common()
1970 switch (ip_hdr(skb)->protocol) { in xmit_common()
1972 if (skb->ip_summed == CHECKSUM_PARTIAL) in xmit_common()
1973 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM; in xmit_common()
1975 swqe->tcp_offset = swqe->ip_end + 1 + in xmit_common()
1980 if (skb->ip_summed == CHECKSUM_PARTIAL) in xmit_common()
1981 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM; in xmit_common()
1983 swqe->tcp_offset = swqe->ip_end + 1 + in xmit_common()
1992 swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT; in ehea_xmit2()
2002 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0]; in ehea_xmit3()
2006 if (!skb->data_len) in ehea_xmit3()
2007 skb_copy_from_linear_data(skb, imm_data, skb->len); in ehea_xmit3()
2009 skb_copy_bits(skb, 0, imm_data, skb->len); in ehea_xmit3()
2011 swqe->immediate_data_length = skb->len; in ehea_xmit3()
2017 struct ehea_port *port = netdev_priv(dev); in ehea_start_xmit() local
2024 pr = &port->port_res[skb_get_queue_mapping(skb)]; in ehea_start_xmit()
2027 swqe = ehea_get_swqe(pr->qp, &swqe_index); in ehea_start_xmit()
2029 atomic_dec(&pr->swqe_avail); in ehea_start_xmit()
2032 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; in ehea_start_xmit()
2033 swqe->vlan_tag = skb_vlan_tag_get(skb); in ehea_start_xmit()
2036 pr->tx_packets++; in ehea_start_xmit()
2037 pr->tx_bytes += skb->len; in ehea_start_xmit()
2039 if (skb->len <= SWQE3_MAX_IMM) { in ehea_start_xmit()
2040 u32 sig_iv = port->sig_comp_iv; in ehea_start_xmit()
2041 u32 swqe_num = pr->swqe_id_counter; in ehea_start_xmit()
2043 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE) in ehea_start_xmit()
2045 if (pr->swqe_ll_count >= (sig_iv - 1)) { in ehea_start_xmit()
2046 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, in ehea_start_xmit()
2048 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; in ehea_start_xmit()
2049 pr->swqe_ll_count = 0; in ehea_start_xmit()
2051 pr->swqe_ll_count += 1; in ehea_start_xmit()
2053 swqe->wr_id = in ehea_start_xmit()
2055 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) in ehea_start_xmit()
2056 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1) in ehea_start_xmit()
2057 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); in ehea_start_xmit()
2058 pr->sq_skba.arr[pr->sq_skba.index] = skb; in ehea_start_xmit()
2060 pr->sq_skba.index++; in ehea_start_xmit()
2061 pr->sq_skba.index &= (pr->sq_skba.len - 1); in ehea_start_xmit()
2063 lkey = pr->send_mr.lkey; in ehea_start_xmit()
2065 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; in ehea_start_xmit()
2067 pr->swqe_id_counter += 1; in ehea_start_xmit()
2069 netif_info(port, tx_queued, dev, in ehea_start_xmit()
2070 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); in ehea_start_xmit()
2071 if (netif_msg_tx_queued(port)) in ehea_start_xmit()
2076 swqe->tx_control |= EHEA_SWQE_PURGE; in ehea_start_xmit()
2079 ehea_post_swqe(pr->qp, swqe); in ehea_start_xmit()
2081 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { in ehea_start_xmit()
2082 pr->p_stats.queue_stopped++; in ehea_start_xmit()
2091 struct ehea_port *port = netdev_priv(dev); in ehea_vlan_rx_add_vid() local
2092 struct ehea_adapter *adapter = port->adapter; in ehea_vlan_rx_add_vid()
2101 err = -ENOMEM; in ehea_vlan_rx_add_vid()
2105 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, in ehea_vlan_rx_add_vid()
2109 err = -EINVAL; in ehea_vlan_rx_add_vid()
2114 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F))); in ehea_vlan_rx_add_vid()
2116 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, in ehea_vlan_rx_add_vid()
2120 err = -EINVAL; in ehea_vlan_rx_add_vid()
2129 struct ehea_port *port = netdev_priv(dev); in ehea_vlan_rx_kill_vid() local
2130 struct ehea_adapter *adapter = port->adapter; in ehea_vlan_rx_kill_vid()
2139 err = -ENOMEM; in ehea_vlan_rx_kill_vid()
2143 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, in ehea_vlan_rx_kill_vid()
2147 err = -EINVAL; in ehea_vlan_rx_kill_vid()
2152 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F))); in ehea_vlan_rx_kill_vid()
2154 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, in ehea_vlan_rx_kill_vid()
2158 err = -EINVAL; in ehea_vlan_rx_kill_vid()
2167 int ret = -EIO; in ehea_activate_qp()
2175 ret = -ENOMEM; in ehea_activate_qp()
2179 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2182 pr_err("query_ehea_qp failed (1)\n"); in ehea_activate_qp()
2186 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED; in ehea_activate_qp()
2187 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2188 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, in ehea_activate_qp()
2191 pr_err("modify_ehea_qp failed (1)\n"); in ehea_activate_qp()
2195 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2202 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED; in ehea_activate_qp()
2203 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2204 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, in ehea_activate_qp()
2211 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2218 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND; in ehea_activate_qp()
2219 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2220 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, in ehea_activate_qp()
2227 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2240 static int ehea_port_res_setup(struct ehea_port *port, int def_qps) in ehea_port_res_setup() argument
2246 port->qp_eq = ehea_create_eq(port->adapter, eq_type, in ehea_port_res_setup()
2247 EHEA_MAX_ENTRIES_EQ, 1); in ehea_port_res_setup()
2248 if (!port->qp_eq) { in ehea_port_res_setup()
2249 ret = -EINVAL; in ehea_port_res_setup()
2261 pr_cfg_small_rx.max_entries_rcq = 1; in ehea_port_res_setup()
2264 pr_cfg_small_rx.max_entries_rq1 = 1; in ehea_port_res_setup()
2265 pr_cfg_small_rx.max_entries_rq2 = 1; in ehea_port_res_setup()
2266 pr_cfg_small_rx.max_entries_rq3 = 1; in ehea_port_res_setup()
2269 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i); in ehea_port_res_setup()
2274 ret = ehea_init_port_res(port, &port->port_res[i], in ehea_port_res_setup()
2283 while (--i >= 0) in ehea_port_res_setup()
2284 ehea_clean_portres(port, &port->port_res[i]); in ehea_port_res_setup()
2287 ehea_destroy_eq(port->qp_eq); in ehea_port_res_setup()
2291 static int ehea_clean_all_portres(struct ehea_port *port) in ehea_clean_all_portres() argument
2296 for (i = 0; i < port->num_def_qps; i++) in ehea_clean_all_portres()
2297 ret |= ehea_clean_portres(port, &port->port_res[i]); in ehea_clean_all_portres()
2299 ret |= ehea_destroy_eq(port->qp_eq); in ehea_clean_all_portres()
2306 if (adapter->active_ports) in ehea_remove_adapter_mr()
2309 ehea_rem_mr(&adapter->mr); in ehea_remove_adapter_mr()
2314 if (adapter->active_ports) in ehea_add_adapter_mr()
2317 return ehea_reg_kernel_mr(adapter, &adapter->mr); in ehea_add_adapter_mr()
2323 struct ehea_port *port = netdev_priv(dev); in ehea_up() local
2325 if (port->state == EHEA_PORT_UP) in ehea_up()
2328 ret = ehea_port_res_setup(port, port->num_def_qps); in ehea_up()
2334 /* Set default QP for this port */ in ehea_up()
2335 ret = ehea_configure_port(port); in ehea_up()
2347 for (i = 0; i < port->num_def_qps; i++) { in ehea_up()
2348 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); in ehea_up()
2355 for (i = 0; i < port->num_def_qps; i++) { in ehea_up()
2356 ret = ehea_fill_port_res(&port->port_res[i]); in ehea_up()
2363 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); in ehea_up()
2365 ret = -EIO; in ehea_up()
2369 port->state = EHEA_PORT_UP; in ehea_up()
2378 ehea_clean_all_portres(port); in ehea_up()
2389 static void port_napi_disable(struct ehea_port *port) in port_napi_disable() argument
2393 for (i = 0; i < port->num_def_qps; i++) in port_napi_disable()
2394 napi_disable(&port->port_res[i].napi); in port_napi_disable()
2397 static void port_napi_enable(struct ehea_port *port) in port_napi_enable() argument
2401 for (i = 0; i < port->num_def_qps; i++) in port_napi_enable()
2402 napi_enable(&port->port_res[i].napi); in port_napi_enable()
2408 struct ehea_port *port = netdev_priv(dev); in ehea_open() local
2410 mutex_lock(&port->port_lock); in ehea_open()
2412 netif_info(port, ifup, dev, "enabling port\n"); in ehea_open()
2418 port_napi_enable(port); in ehea_open()
2422 mutex_unlock(&port->port_lock); in ehea_open()
2423 schedule_delayed_work(&port->stats_work, in ehea_open()
2432 struct ehea_port *port = netdev_priv(dev); in ehea_down() local
2434 if (port->state == EHEA_PORT_DOWN) in ehea_down()
2439 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); in ehea_down()
2443 port->state = EHEA_PORT_DOWN; in ehea_down()
2447 ret = ehea_clean_all_portres(port); in ehea_down()
2459 struct ehea_port *port = netdev_priv(dev); in ehea_stop() local
2461 netif_info(port, ifdown, dev, "disabling port\n"); in ehea_stop()
2463 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); in ehea_stop()
2464 cancel_work_sync(&port->reset_task); in ehea_stop()
2465 cancel_delayed_work_sync(&port->stats_work); in ehea_stop()
2466 mutex_lock(&port->port_lock); in ehea_stop()
2468 port_napi_disable(port); in ehea_stop()
2470 mutex_unlock(&port->port_lock); in ehea_stop()
2471 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); in ehea_stop()
2483 for (i = 0; i < init_attr->act_nr_send_wqes; i++) { in ehea_purge_sq()
2485 swqe->tx_control |= EHEA_SWQE_PURGE; in ehea_purge_sq()
2489 static void ehea_flush_sq(struct ehea_port *port) in ehea_flush_sq() argument
2493 for (i = 0; i < port->num_def_qps; i++) { in ehea_flush_sq()
2494 struct ehea_port_res *pr = &port->port_res[i]; in ehea_flush_sq()
2495 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; in ehea_flush_sq()
2498 ret = wait_event_timeout(port->swqe_avail_wq, in ehea_flush_sq()
2499 atomic_read(&pr->swqe_avail) >= swqe_max, in ehea_flush_sq()
2511 struct ehea_port *port = netdev_priv(dev); in ehea_stop_qps() local
2512 struct ehea_adapter *adapter = port->adapter; in ehea_stop_qps()
2514 int ret = -EIO; in ehea_stop_qps()
2523 ret = -ENOMEM; in ehea_stop_qps()
2527 for (i = 0; i < (port->num_def_qps); i++) { in ehea_stop_qps()
2528 struct ehea_port_res *pr = &port->port_res[i]; in ehea_stop_qps()
2529 struct ehea_qp *qp = pr->qp; in ehea_stop_qps()
2535 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_stop_qps()
2539 pr_err("query_ehea_qp failed (1)\n"); in ehea_stop_qps()
2543 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; in ehea_stop_qps()
2544 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED; in ehea_stop_qps()
2546 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_stop_qps()
2548 1), cb0, &dummy64, in ehea_stop_qps()
2551 pr_err("modify_ehea_qp failed (1)\n"); in ehea_stop_qps()
2555 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_stop_qps()
2583 struct sk_buff **skba_rq2 = pr->rq2_skba.arr; in ehea_update_rqs()
2584 struct sk_buff **skba_rq3 = pr->rq3_skba.arr; in ehea_update_rqs()
2586 u32 lkey = pr->recv_mr.lkey; in ehea_update_rqs()
2592 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) { in ehea_update_rqs()
2594 rwqe->sg_list[0].l_key = lkey; in ehea_update_rqs()
2595 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); in ehea_update_rqs()
2598 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); in ehea_update_rqs()
2601 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) { in ehea_update_rqs()
2603 rwqe->sg_list[0].l_key = lkey; in ehea_update_rqs()
2604 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); in ehea_update_rqs()
2607 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); in ehea_update_rqs()
2613 struct ehea_port *port = netdev_priv(dev); in ehea_restart_qps() local
2614 struct ehea_adapter *adapter = port->adapter; in ehea_restart_qps()
2625 return -ENOMEM; in ehea_restart_qps()
2627 for (i = 0; i < (port->num_def_qps); i++) { in ehea_restart_qps()
2628 struct ehea_port_res *pr = &port->port_res[i]; in ehea_restart_qps()
2629 struct ehea_qp *qp = pr->qp; in ehea_restart_qps()
2640 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_restart_qps()
2644 netdev_err(dev, "query_ehea_qp failed (1)\n"); in ehea_restart_qps()
2645 ret = -EFAULT; in ehea_restart_qps()
2649 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; in ehea_restart_qps()
2650 cb0->qp_ctl_reg |= H_QP_CR_ENABLED; in ehea_restart_qps()
2652 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_restart_qps()
2654 1), cb0, &dummy64, in ehea_restart_qps()
2657 netdev_err(dev, "modify_ehea_qp failed (1)\n"); in ehea_restart_qps()
2658 ret = -EFAULT; in ehea_restart_qps()
2662 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_restart_qps()
2667 ret = -EFAULT; in ehea_restart_qps()
2672 ehea_refill_rq1(pr, pr->rq1_skba.index, 0); in ehea_restart_qps()
2685 struct ehea_port *port = in ehea_reset_port() local
2687 struct net_device *dev = port->netdev; in ehea_reset_port()
2690 port->resets++; in ehea_reset_port()
2691 mutex_lock(&port->port_lock); in ehea_reset_port()
2694 port_napi_disable(port); in ehea_reset_port()
2704 netif_info(port, timer, dev, "reset successful\n"); in ehea_reset_port()
2706 port_napi_enable(port); in ehea_reset_port()
2710 mutex_unlock(&port->port_lock); in ehea_reset_port()
2719 pr_info("LPAR memory changed - re-initializing driver\n"); in ehea_rereg_mrs()
2722 if (adapter->active_ports) { in ehea_rereg_mrs()
2725 struct ehea_port *port = adapter->port[i]; in ehea_rereg_mrs() local
2728 if (!port) in ehea_rereg_mrs()
2731 dev = port->netdev; in ehea_rereg_mrs()
2733 if (dev->flags & IFF_UP) { in ehea_rereg_mrs()
2734 mutex_lock(&port->port_lock); in ehea_rereg_mrs()
2736 ehea_flush_sq(port); in ehea_rereg_mrs()
2739 mutex_unlock(&port->port_lock); in ehea_rereg_mrs()
2742 port_napi_disable(port); in ehea_rereg_mrs()
2743 mutex_unlock(&port->port_lock); in ehea_rereg_mrs()
2745 reset_sq_restart_flag(port); in ehea_rereg_mrs()
2749 ret = ehea_rem_mr(&adapter->mr); in ehea_rereg_mrs()
2751 pr_err("unregister MR failed - driver inoperable!\n"); in ehea_rereg_mrs()
2759 if (adapter->active_ports) { in ehea_rereg_mrs()
2761 ret = ehea_reg_kernel_mr(adapter, &adapter->mr); in ehea_rereg_mrs()
2763 pr_err("register MR failed - driver inoperable!\n"); in ehea_rereg_mrs()
2769 struct ehea_port *port = adapter->port[i]; in ehea_rereg_mrs() local
2771 if (port) { in ehea_rereg_mrs()
2772 struct net_device *dev = port->netdev; in ehea_rereg_mrs()
2774 if (dev->flags & IFF_UP) { in ehea_rereg_mrs()
2775 mutex_lock(&port->port_lock); in ehea_rereg_mrs()
2778 check_sqs(port); in ehea_rereg_mrs()
2779 port_napi_enable(port); in ehea_rereg_mrs()
2784 mutex_unlock(&port->port_lock); in ehea_rereg_mrs()
2789 pr_info("re-initializing driver complete\n"); in ehea_rereg_mrs()
2796 struct ehea_port *port = netdev_priv(dev); in ehea_tx_watchdog() local
2800 ehea_schedule_port_reset(port); in ehea_tx_watchdog()
2811 ret = -ENOMEM; in ehea_sense_adapter_attr()
2815 hret = ehea_h_query_ehea(adapter->handle, cb); in ehea_sense_adapter_attr()
2818 ret = -EIO; in ehea_sense_adapter_attr()
2822 adapter->max_mc_mac = cb->max_mc_mac - 1; in ehea_sense_adapter_attr()
2831 static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) in ehea_get_jumboframe_status() argument
2843 ret = -ENOMEM; in ehea_get_jumboframe_status()
2846 hret = ehea_h_query_ehea_port(port->adapter->handle, in ehea_get_jumboframe_status()
2847 port->logical_port_id, in ehea_get_jumboframe_status()
2851 if (cb4->jumbo_frame) in ehea_get_jumboframe_status()
2852 *jumbo = 1; in ehea_get_jumboframe_status()
2854 cb4->jumbo_frame = 1; in ehea_get_jumboframe_status()
2855 hret = ehea_h_modify_ehea_port(port->adapter-> in ehea_get_jumboframe_status()
2857 port-> in ehea_get_jumboframe_status()
2863 *jumbo = 1; in ehea_get_jumboframe_status()
2866 ret = -EINVAL; in ehea_get_jumboframe_status()
2877 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); in log_port_id_show() local
2878 return sprintf(buf, "%d", port->logical_port_id); in log_port_id_show()
2885 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); in logical_port_release() local
2886 of_node_put(port->ofdev.dev.of_node); in logical_port_release()
2889 static struct device *ehea_register_port(struct ehea_port *port, in ehea_register_port() argument
2894 port->ofdev.dev.of_node = of_node_get(dn); in ehea_register_port()
2895 port->ofdev.dev.parent = &port->adapter->ofdev->dev; in ehea_register_port()
2896 port->ofdev.dev.bus = &ibmebus_bus_type; in ehea_register_port()
2898 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++); in ehea_register_port()
2899 port->ofdev.dev.release = logical_port_release; in ehea_register_port()
2901 ret = of_device_register(&port->ofdev); in ehea_register_port()
2904 put_device(&port->ofdev.dev); in ehea_register_port()
2908 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); in ehea_register_port()
2914 return &port->ofdev.dev; in ehea_register_port()
2917 of_device_unregister(&port->ofdev); in ehea_register_port()
2922 static void ehea_unregister_port(struct ehea_port *port) in ehea_unregister_port() argument
2924 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); in ehea_unregister_port()
2925 of_device_unregister(&port->ofdev); in ehea_unregister_port()
2947 struct ehea_port *port; in ehea_setup_single_port() local
2951 /* allocate memory for the port structures */ in ehea_setup_single_port()
2955 ret = -ENOMEM; in ehea_setup_single_port()
2959 port = netdev_priv(dev); in ehea_setup_single_port()
2961 mutex_init(&port->port_lock); in ehea_setup_single_port()
2962 port->state = EHEA_PORT_DOWN; in ehea_setup_single_port()
2963 port->sig_comp_iv = sq_entries / 10; in ehea_setup_single_port()
2965 port->adapter = adapter; in ehea_setup_single_port()
2966 port->netdev = dev; in ehea_setup_single_port()
2967 port->logical_port_id = logical_port_id; in ehea_setup_single_port()
2969 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT); in ehea_setup_single_port()
2971 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL); in ehea_setup_single_port()
2972 if (!port->mc_list) { in ehea_setup_single_port()
2973 ret = -ENOMEM; in ehea_setup_single_port()
2977 INIT_LIST_HEAD(&port->mc_list->list); in ehea_setup_single_port()
2979 ret = ehea_sense_port_attr(port); in ehea_setup_single_port()
2983 netif_set_real_num_rx_queues(dev, port->num_def_qps); in ehea_setup_single_port()
2984 netif_set_real_num_tx_queues(dev, port->num_def_qps); in ehea_setup_single_port()
2986 port_dev = ehea_register_port(port, dn); in ehea_setup_single_port()
2993 eth_hw_addr_set(dev, (u8 *)&port->mac_addr); in ehea_setup_single_port()
2995 dev->netdev_ops = &ehea_netdev_ops; in ehea_setup_single_port()
2998 dev->hw_features = NETIF_F_SG | NETIF_F_TSO | in ehea_setup_single_port()
3000 dev->features = NETIF_F_SG | NETIF_F_TSO | in ehea_setup_single_port()
3004 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA | in ehea_setup_single_port()
3006 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; in ehea_setup_single_port()
3008 /* MTU range: 68 - 9022 */ in ehea_setup_single_port()
3009 dev->min_mtu = ETH_MIN_MTU; in ehea_setup_single_port()
3010 dev->max_mtu = EHEA_MAX_PACKET_SIZE; in ehea_setup_single_port()
3012 INIT_WORK(&port->reset_task, ehea_reset_port); in ehea_setup_single_port()
3013 INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats); in ehea_setup_single_port()
3015 init_waitqueue_head(&port->swqe_avail_wq); in ehea_setup_single_port()
3016 init_waitqueue_head(&port->restart_wq); in ehea_setup_single_port()
3024 ret = ehea_get_jumboframe_status(port, &jumbo); in ehea_setup_single_port()
3029 jumbo == 1 ? "en" : "dis"); in ehea_setup_single_port()
3031 adapter->active_ports++; in ehea_setup_single_port()
3033 return port; in ehea_setup_single_port()
3036 ehea_unregister_port(port); in ehea_setup_single_port()
3039 kfree(port->mc_list); in ehea_setup_single_port()
3045 pr_err("setting up logical port with id=%d failed, ret=%d\n", in ehea_setup_single_port()
3050 static void ehea_shutdown_single_port(struct ehea_port *port) in ehea_shutdown_single_port() argument
3052 struct ehea_adapter *adapter = port->adapter; in ehea_shutdown_single_port()
3054 cancel_work_sync(&port->reset_task); in ehea_shutdown_single_port()
3055 cancel_delayed_work_sync(&port->stats_work); in ehea_shutdown_single_port()
3056 unregister_netdev(port->netdev); in ehea_shutdown_single_port()
3057 ehea_unregister_port(port); in ehea_shutdown_single_port()
3058 kfree(port->mc_list); in ehea_shutdown_single_port()
3059 free_netdev(port->netdev); in ehea_shutdown_single_port()
3060 adapter->active_ports--; in ehea_shutdown_single_port()
3071 lhea_dn = adapter->ofdev->dev.of_node; in ehea_setup_ports()
3073 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", in ehea_setup_ports()
3083 return -EIO; in ehea_setup_ports()
3086 adapter->port[i] = ehea_setup_single_port(adapter, in ehea_setup_ports()
3089 if (adapter->port[i]) in ehea_setup_ports()
3090 netdev_info(adapter->port[i]->netdev, in ehea_setup_ports()
3091 "logical port id #%d\n", *dn_log_port_id); in ehea_setup_ports()
3107 lhea_dn = adapter->ofdev->dev.of_node; in ehea_get_eth_dn()
3109 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", in ehea_get_eth_dn()
3124 struct ehea_port *port; in probe_port_store() local
3132 port = ehea_get_port(adapter, logical_port_id); in probe_port_store()
3134 if (port) { in probe_port_store()
3135 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n", in probe_port_store()
3137 return -EINVAL; in probe_port_store()
3143 pr_info("no logical port with id %d found\n", logical_port_id); in probe_port_store()
3144 return -EINVAL; in probe_port_store()
3150 return -EIO; in probe_port_store()
3153 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn); in probe_port_store()
3157 if (port) { in probe_port_store()
3159 if (!adapter->port[i]) { in probe_port_store()
3160 adapter->port[i] = port; in probe_port_store()
3164 netdev_info(port->netdev, "added: (logical port id=%d)\n", in probe_port_store()
3168 return -EIO; in probe_port_store()
3179 struct ehea_port *port; in remove_port_store() local
3185 port = ehea_get_port(adapter, logical_port_id); in remove_port_store()
3187 if (port) { in remove_port_store()
3188 netdev_info(port->netdev, "removed: (logical port id=%d)\n", in remove_port_store()
3191 ehea_shutdown_single_port(port); in remove_port_store()
3194 if (adapter->port[i] == port) { in remove_port_store()
3195 adapter->port[i] = NULL; in remove_port_store()
3199 pr_err("removing port with logical port id=%d failed. port not configured.\n", in remove_port_store()
3201 return -EINVAL; in remove_port_store()
3214 int ret = device_create_file(&dev->dev, &dev_attr_probe_port); in ehea_create_device_sysfs()
3218 ret = device_create_file(&dev->dev, &dev_attr_remove_port); in ehea_create_device_sysfs()
3225 device_remove_file(&dev->dev, &dev_attr_probe_port); in ehea_remove_device_sysfs()
3226 device_remove_file(&dev->dev, &dev_attr_remove_port); in ehea_remove_device_sysfs()
3254 fallthrough; /* re-add canceled memory block */ in ehea_mem_notifier()
3259 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) in ehea_mem_notifier()
3267 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) in ehea_mem_notifier()
3314 if (atomic_inc_return(&ehea_memory_hooks_registered) > 1) in ehea_register_memory_hooks()
3375 if (!dev || !dev->dev.of_node) { in ehea_probe_adapter()
3377 return -EINVAL; in ehea_probe_adapter()
3380 adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL); in ehea_probe_adapter()
3382 ret = -ENOMEM; in ehea_probe_adapter()
3383 dev_err(&dev->dev, "no mem for ehea_adapter\n"); in ehea_probe_adapter()
3387 list_add(&adapter->list, &adapter_list); in ehea_probe_adapter()
3389 adapter->ofdev = dev; in ehea_probe_adapter()
3391 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle", in ehea_probe_adapter()
3394 adapter->handle = *adapter_handle; in ehea_probe_adapter()
3396 if (!adapter->handle) { in ehea_probe_adapter()
3397 dev_err(&dev->dev, "failed getting handle for adapter" in ehea_probe_adapter()
3398 " '%pOF'\n", dev->dev.of_node); in ehea_probe_adapter()
3399 ret = -ENODEV; in ehea_probe_adapter()
3403 adapter->pd = EHEA_PD_ID; in ehea_probe_adapter()
3412 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret); in ehea_probe_adapter()
3416 adapter->neq = ehea_create_eq(adapter, in ehea_probe_adapter()
3417 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1); in ehea_probe_adapter()
3418 if (!adapter->neq) { in ehea_probe_adapter()
3419 ret = -EIO; in ehea_probe_adapter()
3420 dev_err(&dev->dev, "NEQ creation failed\n"); in ehea_probe_adapter()
3424 tasklet_setup(&adapter->neq_tasklet, ehea_neq_tasklet); in ehea_probe_adapter()
3432 dev_err(&dev->dev, "setup_ports failed\n"); in ehea_probe_adapter()
3436 ret = ibmebus_request_irq(adapter->neq->attr.ist1, in ehea_probe_adapter()
3440 dev_err(&dev->dev, "requesting NEQ IRQ failed\n"); in ehea_probe_adapter()
3445 tasklet_hi_schedule(&adapter->neq_tasklet); in ehea_probe_adapter()
3452 if (adapter->port[i]) { in ehea_probe_adapter()
3453 ehea_shutdown_single_port(adapter->port[i]); in ehea_probe_adapter()
3454 adapter->port[i] = NULL; in ehea_probe_adapter()
3461 ehea_destroy_eq(adapter->neq); in ehea_probe_adapter()
3464 list_del(&adapter->list); in ehea_probe_adapter()
3478 if (adapter->port[i]) { in ehea_remove()
3479 ehea_shutdown_single_port(adapter->port[i]); in ehea_remove()
3480 adapter->port[i] = NULL; in ehea_remove()
3485 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); in ehea_remove()
3486 tasklet_kill(&adapter->neq_tasklet); in ehea_remove()
3488 ehea_destroy_eq(adapter->neq); in ehea_remove()
3490 list_del(&adapter->list); in ehea_remove()
3502 ret = -EINVAL; in check_module_parm()
3507 ret = -EINVAL; in check_module_parm()
3512 ret = -EINVAL; in check_module_parm()
3517 ret = -EINVAL; in check_module_parm()