1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/etherdevice.h>
20 #include <linux/if_bridge.h>
21 #include <linux/list.h>
22 #include <linux/hash.h>
23 #include <linux/hashtable.h>
24 #include <net/switchdev.h>
25 #include <asm/chsc.h>
26 #include <asm/css_chars.h>
27 #include <asm/setup.h>
28 #include "qeth_core.h"
29 #include "qeth_l2.h"
30
qeth_l2_setdelmac_makerc(struct qeth_card * card,u16 retcode)31 static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
32 {
33 int rc;
34
35 if (retcode)
36 QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
37 switch (retcode) {
38 case IPA_RC_SUCCESS:
39 rc = 0;
40 break;
41 case IPA_RC_L2_UNSUPPORTED_CMD:
42 rc = -EOPNOTSUPP;
43 break;
44 case IPA_RC_L2_ADDR_TABLE_FULL:
45 rc = -ENOSPC;
46 break;
47 case IPA_RC_L2_DUP_MAC:
48 case IPA_RC_L2_DUP_LAYER3_MAC:
49 rc = -EADDRINUSE;
50 break;
51 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
52 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
53 rc = -EADDRNOTAVAIL;
54 break;
55 case IPA_RC_L2_MAC_NOT_FOUND:
56 rc = -ENOENT;
57 break;
58 default:
59 rc = -EIO;
60 break;
61 }
62 return rc;
63 }
64
qeth_l2_send_setdelmac_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)65 static int qeth_l2_send_setdelmac_cb(struct qeth_card *card,
66 struct qeth_reply *reply,
67 unsigned long data)
68 {
69 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
70
71 return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code);
72 }
73
qeth_l2_send_setdelmac(struct qeth_card * card,const __u8 * mac,enum qeth_ipa_cmds ipacmd)74 static int qeth_l2_send_setdelmac(struct qeth_card *card, const __u8 *mac,
75 enum qeth_ipa_cmds ipacmd)
76 {
77 struct qeth_ipa_cmd *cmd;
78 struct qeth_cmd_buffer *iob;
79
80 QETH_CARD_TEXT(card, 2, "L2sdmac");
81 iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
82 IPA_DATA_SIZEOF(setdelmac));
83 if (!iob)
84 return -ENOMEM;
85 cmd = __ipa_cmd(iob);
86 cmd->data.setdelmac.mac_length = ETH_ALEN;
87 ether_addr_copy(cmd->data.setdelmac.mac, mac);
88 return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL);
89 }
90
qeth_l2_send_setmac(struct qeth_card * card,const __u8 * mac)91 static int qeth_l2_send_setmac(struct qeth_card *card, const __u8 *mac)
92 {
93 int rc;
94
95 QETH_CARD_TEXT(card, 2, "L2Setmac");
96 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
97 if (rc == 0) {
98 dev_info(&card->gdev->dev,
99 "MAC address %pM successfully registered\n", mac);
100 } else {
101 switch (rc) {
102 case -EADDRINUSE:
103 dev_warn(&card->gdev->dev,
104 "MAC address %pM already exists\n", mac);
105 break;
106 case -EADDRNOTAVAIL:
107 dev_warn(&card->gdev->dev,
108 "MAC address %pM is not authorized\n", mac);
109 break;
110 }
111 }
112 return rc;
113 }
114
qeth_l2_write_mac(struct qeth_card * card,u8 * mac)115 static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
116 {
117 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
118 IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
119 int rc;
120
121 QETH_CARD_TEXT(card, 2, "L2Wmac");
122 rc = qeth_l2_send_setdelmac(card, mac, cmd);
123 if (rc == -EADDRINUSE)
124 QETH_DBF_MESSAGE(2, "MAC address %012llx is already registered on device %x\n",
125 ether_addr_to_u64(mac), CARD_DEVID(card));
126 else if (rc)
127 QETH_DBF_MESSAGE(2, "Failed to register MAC address %012llx on device %x: %d\n",
128 ether_addr_to_u64(mac), CARD_DEVID(card), rc);
129 return rc;
130 }
131
qeth_l2_remove_mac(struct qeth_card * card,u8 * mac)132 static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
133 {
134 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
135 IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
136 int rc;
137
138 QETH_CARD_TEXT(card, 2, "L2Rmac");
139 rc = qeth_l2_send_setdelmac(card, mac, cmd);
140 if (rc)
141 QETH_DBF_MESSAGE(2, "Failed to delete MAC address %012llx on device %x: %d\n",
142 ether_addr_to_u64(mac), CARD_DEVID(card), rc);
143 return rc;
144 }
145
qeth_l2_drain_rx_mode_cache(struct qeth_card * card)146 static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
147 {
148 struct qeth_mac *mac;
149 struct hlist_node *tmp;
150 int i;
151
152 hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
153 hash_del(&mac->hnode);
154 kfree(mac);
155 }
156 }
157
qeth_l2_fill_header(struct qeth_qdio_out_q * queue,struct qeth_hdr * hdr,struct sk_buff * skb,__be16 proto,unsigned int data_len)158 static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
159 struct qeth_hdr *hdr, struct sk_buff *skb,
160 __be16 proto, unsigned int data_len)
161 {
162 int cast_type = qeth_get_ether_cast_type(skb);
163 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
164
165 hdr->hdr.l2.pkt_length = data_len;
166
167 if (skb_is_gso(skb)) {
168 hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO;
169 } else {
170 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
171 if (skb->ip_summed == CHECKSUM_PARTIAL)
172 qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], proto);
173 }
174
175 /* set byte byte 3 to casting flags */
176 if (cast_type == RTN_MULTICAST)
177 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
178 else if (cast_type == RTN_BROADCAST)
179 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
180 else
181 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
182
183 /* VSWITCH relies on the VLAN
184 * information to be present in
185 * the QDIO header */
186 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
187 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
188 hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
189 }
190 }
191
qeth_l2_setdelvlan_makerc(struct qeth_card * card,u16 retcode)192 static int qeth_l2_setdelvlan_makerc(struct qeth_card *card, u16 retcode)
193 {
194 if (retcode)
195 QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
196
197 switch (retcode) {
198 case IPA_RC_SUCCESS:
199 return 0;
200 case IPA_RC_L2_INVALID_VLAN_ID:
201 return -EINVAL;
202 case IPA_RC_L2_DUP_VLAN_ID:
203 return -EEXIST;
204 case IPA_RC_L2_VLAN_ID_NOT_FOUND:
205 return -ENOENT;
206 case IPA_RC_L2_VLAN_ID_NOT_ALLOWED:
207 return -EPERM;
208 default:
209 return -EIO;
210 }
211 }
212
qeth_l2_send_setdelvlan_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)213 static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
214 struct qeth_reply *reply,
215 unsigned long data)
216 {
217 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
218
219 QETH_CARD_TEXT(card, 2, "L2sdvcb");
220 if (cmd->hdr.return_code) {
221 QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n",
222 cmd->data.setdelvlan.vlan_id,
223 CARD_DEVID(card), cmd->hdr.return_code);
224 QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
225 }
226 return qeth_l2_setdelvlan_makerc(card, cmd->hdr.return_code);
227 }
228
qeth_l2_send_setdelvlan(struct qeth_card * card,__u16 i,enum qeth_ipa_cmds ipacmd)229 static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
230 enum qeth_ipa_cmds ipacmd)
231 {
232 struct qeth_ipa_cmd *cmd;
233 struct qeth_cmd_buffer *iob;
234
235 QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
236 iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
237 IPA_DATA_SIZEOF(setdelvlan));
238 if (!iob)
239 return -ENOMEM;
240 cmd = __ipa_cmd(iob);
241 cmd->data.setdelvlan.vlan_id = i;
242 return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelvlan_cb, NULL);
243 }
244
qeth_l2_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)245 static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
246 __be16 proto, u16 vid)
247 {
248 struct qeth_card *card = dev->ml_priv;
249
250 QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
251 if (!vid)
252 return 0;
253
254 return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
255 }
256
qeth_l2_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)257 static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
258 __be16 proto, u16 vid)
259 {
260 struct qeth_card *card = dev->ml_priv;
261
262 QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
263 if (!vid)
264 return 0;
265
266 return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
267 }
268
qeth_l2_set_pnso_mode(struct qeth_card * card,enum qeth_pnso_mode mode)269 static void qeth_l2_set_pnso_mode(struct qeth_card *card,
270 enum qeth_pnso_mode mode)
271 {
272 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
273 WRITE_ONCE(card->info.pnso_mode, mode);
274 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
275
276 if (mode == QETH_PNSO_NONE)
277 drain_workqueue(card->event_wq);
278 }
279
qeth_l2_dev2br_fdb_flush(struct qeth_card * card)280 static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
281 {
282 struct switchdev_notifier_fdb_info info = {};
283
284 QETH_CARD_TEXT(card, 2, "fdbflush");
285
286 info.addr = NULL;
287 /* flush all VLANs: */
288 info.vid = 0;
289 info.added_by_user = false;
290 info.offloaded = true;
291
292 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
293 card->dev, &info.info, NULL);
294 }
295
qeth_l2_request_initial_mac(struct qeth_card * card)296 static int qeth_l2_request_initial_mac(struct qeth_card *card)
297 {
298 int rc = 0;
299
300 QETH_CARD_TEXT(card, 2, "l2reqmac");
301
302 if (MACHINE_IS_VM) {
303 rc = qeth_vm_request_mac(card);
304 if (!rc)
305 goto out;
306 QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
307 CARD_DEVID(card), rc);
308 QETH_CARD_TEXT_(card, 2, "err%04x", rc);
309 /* fall back to alternative mechanism: */
310 }
311
312 rc = qeth_setadpparms_change_macaddr(card);
313 if (!rc)
314 goto out;
315 QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
316 CARD_DEVID(card), rc);
317 QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
318
319 /* Fall back once more, but some devices don't support a custom MAC
320 * address:
321 */
322 if (IS_OSM(card) || IS_OSX(card))
323 return (rc) ? rc : -EADDRNOTAVAIL;
324 eth_hw_addr_random(card->dev);
325
326 out:
327 QETH_CARD_HEX(card, 2, card->dev->dev_addr, card->dev->addr_len);
328 return 0;
329 }
330
qeth_l2_register_dev_addr(struct qeth_card * card)331 static void qeth_l2_register_dev_addr(struct qeth_card *card)
332 {
333 if (!is_valid_ether_addr(card->dev->dev_addr))
334 qeth_l2_request_initial_mac(card);
335
336 if (!qeth_l2_send_setmac(card, card->dev->dev_addr))
337 card->info.dev_addr_is_registered = 1;
338 else
339 card->info.dev_addr_is_registered = 0;
340 }
341
qeth_l2_validate_addr(struct net_device * dev)342 static int qeth_l2_validate_addr(struct net_device *dev)
343 {
344 struct qeth_card *card = dev->ml_priv;
345
346 if (card->info.dev_addr_is_registered)
347 return eth_validate_addr(dev);
348
349 QETH_CARD_TEXT(card, 4, "nomacadr");
350 return -EPERM;
351 }
352
qeth_l2_set_mac_address(struct net_device * dev,void * p)353 static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
354 {
355 struct sockaddr *addr = p;
356 struct qeth_card *card = dev->ml_priv;
357 u8 old_addr[ETH_ALEN];
358 int rc = 0;
359
360 QETH_CARD_TEXT(card, 3, "setmac");
361
362 if (IS_OSM(card) || IS_OSX(card)) {
363 QETH_CARD_TEXT(card, 3, "setmcTYP");
364 return -EOPNOTSUPP;
365 }
366 QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
367 if (!is_valid_ether_addr(addr->sa_data))
368 return -EADDRNOTAVAIL;
369
370 /* don't register the same address twice */
371 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
372 card->info.dev_addr_is_registered)
373 return 0;
374
375 /* add the new address, switch over, drop the old */
376 rc = qeth_l2_send_setmac(card, addr->sa_data);
377 if (rc)
378 return rc;
379 ether_addr_copy(old_addr, dev->dev_addr);
380 eth_hw_addr_set(dev, addr->sa_data);
381
382 if (card->info.dev_addr_is_registered)
383 qeth_l2_remove_mac(card, old_addr);
384 card->info.dev_addr_is_registered = 1;
385 return 0;
386 }
387
qeth_l2_promisc_to_bridge(struct qeth_card * card,bool enable)388 static void qeth_l2_promisc_to_bridge(struct qeth_card *card, bool enable)
389 {
390 int role;
391 int rc;
392
393 QETH_CARD_TEXT(card, 3, "pmisc2br");
394
395 if (enable) {
396 if (card->options.sbp.reflect_promisc_primary)
397 role = QETH_SBP_ROLE_PRIMARY;
398 else
399 role = QETH_SBP_ROLE_SECONDARY;
400 } else
401 role = QETH_SBP_ROLE_NONE;
402
403 rc = qeth_bridgeport_setrole(card, role);
404 QETH_CARD_TEXT_(card, 2, "bpm%c%04x", enable ? '+' : '-', rc);
405 if (!rc) {
406 card->options.sbp.role = role;
407 card->info.promisc_mode = enable;
408 }
409 }
410
qeth_l2_set_promisc_mode(struct qeth_card * card)411 static void qeth_l2_set_promisc_mode(struct qeth_card *card)
412 {
413 bool enable = card->dev->flags & IFF_PROMISC;
414
415 if (card->info.promisc_mode == enable)
416 return;
417
418 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) {
419 qeth_setadp_promisc_mode(card, enable);
420 } else {
421 mutex_lock(&card->sbp_lock);
422 if (card->options.sbp.reflect_promisc)
423 qeth_l2_promisc_to_bridge(card, enable);
424 mutex_unlock(&card->sbp_lock);
425 }
426 }
427
428 /* New MAC address is added to the hash table and marked to be written on card
429 * only if there is not in the hash table storage already
430 *
431 */
qeth_l2_add_mac(struct qeth_card * card,struct netdev_hw_addr * ha)432 static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
433 {
434 u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
435 struct qeth_mac *mac;
436
437 hash_for_each_possible(card->rx_mode_addrs, mac, hnode, mac_hash) {
438 if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
439 mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
440 return;
441 }
442 }
443
444 mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
445 if (!mac)
446 return;
447
448 ether_addr_copy(mac->mac_addr, ha->addr);
449 mac->disp_flag = QETH_DISP_ADDR_ADD;
450
451 hash_add(card->rx_mode_addrs, &mac->hnode, mac_hash);
452 }
453
qeth_l2_rx_mode_work(struct work_struct * work)454 static void qeth_l2_rx_mode_work(struct work_struct *work)
455 {
456 struct qeth_card *card = container_of(work, struct qeth_card,
457 rx_mode_work);
458 struct net_device *dev = card->dev;
459 struct netdev_hw_addr *ha;
460 struct qeth_mac *mac;
461 struct hlist_node *tmp;
462 int i;
463 int rc;
464
465 QETH_CARD_TEXT(card, 3, "setmulti");
466
467 netif_addr_lock_bh(dev);
468 netdev_for_each_mc_addr(ha, dev)
469 qeth_l2_add_mac(card, ha);
470 netdev_for_each_uc_addr(ha, dev)
471 qeth_l2_add_mac(card, ha);
472 netif_addr_unlock_bh(dev);
473
474 hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
475 switch (mac->disp_flag) {
476 case QETH_DISP_ADDR_DELETE:
477 qeth_l2_remove_mac(card, mac->mac_addr);
478 hash_del(&mac->hnode);
479 kfree(mac);
480 break;
481 case QETH_DISP_ADDR_ADD:
482 rc = qeth_l2_write_mac(card, mac->mac_addr);
483 if (rc) {
484 hash_del(&mac->hnode);
485 kfree(mac);
486 break;
487 }
488 fallthrough;
489 default:
490 /* for next call to set_rx_mode(): */
491 mac->disp_flag = QETH_DISP_ADDR_DELETE;
492 }
493 }
494
495 qeth_l2_set_promisc_mode(card);
496 }
497
qeth_l2_hard_start_xmit(struct sk_buff * skb,struct net_device * dev)498 static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
499 struct net_device *dev)
500 {
501 struct qeth_card *card = dev->ml_priv;
502 u16 txq = skb_get_queue_mapping(skb);
503 struct qeth_qdio_out_q *queue;
504 int rc;
505
506 if (!skb_is_gso(skb))
507 qdisc_skb_cb(skb)->pkt_len = skb->len;
508 if (IS_IQD(card))
509 txq = qeth_iqd_translate_txq(dev, txq);
510 queue = card->qdio.out_qs[txq];
511
512 rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
513 qeth_l2_fill_header);
514 if (!rc)
515 return NETDEV_TX_OK;
516
517 QETH_TXQ_STAT_INC(queue, tx_dropped);
518 kfree_skb(skb);
519 return NETDEV_TX_OK;
520 }
521
qeth_l2_iqd_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)522 static u16 qeth_l2_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
523 struct net_device *sb_dev)
524 {
525 return qeth_iqd_select_queue(dev, skb, qeth_get_ether_cast_type(skb),
526 sb_dev);
527 }
528
qeth_l2_set_rx_mode(struct net_device * dev)529 static void qeth_l2_set_rx_mode(struct net_device *dev)
530 {
531 struct qeth_card *card = dev->ml_priv;
532
533 schedule_work(&card->rx_mode_work);
534 }
535
536 /**
537 * qeth_l2_pnso() - perform network subchannel operation
538 * @card: qeth_card structure pointer
539 * @oc: Operation Code
540 * @cnc: Boolean Change-Notification Control
541 * @cb: Callback function will be executed for each element
542 * of the address list
543 * @priv: Pointer to pass to the callback function.
544 *
545 * Collects network information in a network address list and calls the
546 * callback function for every entry in the list. If "change-notification-
547 * control" is set, further changes in the address list will be reported
548 * via the IPA command.
549 */
qeth_l2_pnso(struct qeth_card * card,u8 oc,int cnc,void (* cb)(void * priv,struct chsc_pnso_naid_l2 * entry),void * priv)550 static int qeth_l2_pnso(struct qeth_card *card, u8 oc, int cnc,
551 void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry),
552 void *priv)
553 {
554 struct ccw_device *ddev = CARD_DDEV(card);
555 struct chsc_pnso_area *rr;
556 u32 prev_instance = 0;
557 int isfirstblock = 1;
558 int i, size, elems;
559 int rc;
560
561 rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
562 if (rr == NULL)
563 return -ENOMEM;
564 do {
565 QETH_CARD_TEXT(card, 2, "PNSO");
566 /* on the first iteration, naihdr.resume_token will be zero */
567 rc = ccw_device_pnso(ddev, rr, oc, rr->naihdr.resume_token,
568 cnc);
569 if (rc)
570 continue;
571 if (cb == NULL)
572 continue;
573
574 size = rr->naihdr.naids;
575 if (size != sizeof(struct chsc_pnso_naid_l2)) {
576 WARN_ON_ONCE(1);
577 continue;
578 }
579
580 elems = (rr->response.length - sizeof(struct chsc_header) -
581 sizeof(struct chsc_pnso_naihdr)) / size;
582
583 if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
584 /* Inform the caller that they need to scrap */
585 /* the data that was already reported via cb */
586 rc = -EAGAIN;
587 break;
588 }
589 isfirstblock = 0;
590 prev_instance = rr->naihdr.instance;
591 for (i = 0; i < elems; i++)
592 (*cb)(priv, &rr->entries[i]);
593 } while ((rc == -EBUSY) || (!rc && /* list stored */
594 /* resume token is non-zero => list incomplete */
595 (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
596
597 if (rc)
598 QETH_CARD_TEXT_(card, 2, "PNrp%04x", rr->response.code);
599
600 free_page((unsigned long)rr);
601 return rc;
602 }
603
qeth_is_my_net_if_token(struct qeth_card * card,struct net_if_token * token)604 static bool qeth_is_my_net_if_token(struct qeth_card *card,
605 struct net_if_token *token)
606 {
607 return ((card->info.ddev_devno == token->devnum) &&
608 (card->info.cssid == token->cssid) &&
609 (card->info.iid == token->iid) &&
610 (card->info.ssid == token->ssid) &&
611 (card->info.chpid == token->chpid) &&
612 (card->info.chid == token->chid));
613 }
614
615 /**
616 * qeth_l2_dev2br_fdb_notify() - update fdb of master bridge
617 * @card: qeth_card structure pointer
618 * @code: event bitmask: high order bit 0x80 set to
619 * 1 - removal of an object
620 * 0 - addition of an object
621 * Object type(s):
622 * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC
623 * @token: "network token" structure identifying 'physical' location
624 * of the target
625 * @addr_lnid: structure with MAC address and VLAN ID of the target
626 */
qeth_l2_dev2br_fdb_notify(struct qeth_card * card,u8 code,struct net_if_token * token,struct mac_addr_lnid * addr_lnid)627 static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
628 struct net_if_token *token,
629 struct mac_addr_lnid *addr_lnid)
630 {
631 struct switchdev_notifier_fdb_info info = {};
632 u8 ntfy_mac[ETH_ALEN];
633
634 ether_addr_copy(ntfy_mac, addr_lnid->mac);
635 /* Ignore VLAN only changes */
636 if (!(code & IPA_ADDR_CHANGE_CODE_MACADDR))
637 return;
638 /* Ignore mcast entries */
639 if (is_multicast_ether_addr(ntfy_mac))
640 return;
641 /* Ignore my own addresses */
642 if (qeth_is_my_net_if_token(card, token))
643 return;
644
645 info.addr = ntfy_mac;
646 /* don't report VLAN IDs */
647 info.vid = 0;
648 info.added_by_user = false;
649 info.offloaded = true;
650
651 if (code & IPA_ADDR_CHANGE_CODE_REMOVAL) {
652 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
653 card->dev, &info.info, NULL);
654 QETH_CARD_TEXT(card, 4, "andelmac");
655 QETH_CARD_TEXT_(card, 4,
656 "mc%012llx", ether_addr_to_u64(ntfy_mac));
657 } else {
658 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
659 card->dev, &info.info, NULL);
660 QETH_CARD_TEXT(card, 4, "anaddmac");
661 QETH_CARD_TEXT_(card, 4,
662 "mc%012llx", ether_addr_to_u64(ntfy_mac));
663 }
664 }
665
qeth_l2_dev2br_an_set_cb(void * priv,struct chsc_pnso_naid_l2 * entry)666 static void qeth_l2_dev2br_an_set_cb(void *priv,
667 struct chsc_pnso_naid_l2 *entry)
668 {
669 u8 code = IPA_ADDR_CHANGE_CODE_MACADDR;
670 struct qeth_card *card = priv;
671
672 if (entry->addr_lnid.lnid < VLAN_N_VID)
673 code |= IPA_ADDR_CHANGE_CODE_VLANID;
674 qeth_l2_dev2br_fdb_notify(card, code,
675 (struct net_if_token *)&entry->nit,
676 (struct mac_addr_lnid *)&entry->addr_lnid);
677 }
678
679 /**
680 * qeth_l2_dev2br_an_set() -
681 * Enable or disable 'dev to bridge network address notification'
682 * @card: qeth_card structure pointer
683 * @enable: Enable or disable 'dev to bridge network address notification'
684 *
685 * Returns negative errno-compatible error indication or 0 on success.
686 *
687 * On enable, emits a series of address notifications for all
688 * currently registered hosts.
689 */
qeth_l2_dev2br_an_set(struct qeth_card * card,bool enable)690 static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
691 {
692 int rc;
693
694 if (enable) {
695 QETH_CARD_TEXT(card, 2, "anseton");
696 rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 1,
697 qeth_l2_dev2br_an_set_cb, card);
698 if (rc == -EAGAIN)
699 /* address notification enabled, but inconsistent
700 * addresses reported -> disable address notification
701 */
702 qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0,
703 NULL, NULL);
704 } else {
705 QETH_CARD_TEXT(card, 2, "ansetoff");
706 rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
707 }
708
709 return rc;
710 }
711
712 struct qeth_l2_br2dev_event_work {
713 struct work_struct work;
714 struct net_device *br_dev;
715 struct net_device *lsync_dev;
716 struct net_device *dst_dev;
717 unsigned long event;
718 unsigned char addr[ETH_ALEN];
719 };
720
721 static const struct net_device_ops qeth_l2_iqd_netdev_ops;
722 static const struct net_device_ops qeth_l2_osa_netdev_ops;
723
qeth_l2_must_learn(struct net_device * netdev,struct net_device * dstdev)724 static bool qeth_l2_must_learn(struct net_device *netdev,
725 struct net_device *dstdev)
726 {
727 struct qeth_priv *priv;
728
729 priv = netdev_priv(netdev);
730 return (netdev != dstdev &&
731 (priv->brport_features & BR_LEARNING_SYNC) &&
732 !(br_port_flag_is_set(netdev, BR_ISOLATED) &&
733 br_port_flag_is_set(dstdev, BR_ISOLATED)) &&
734 (netdev->netdev_ops == &qeth_l2_iqd_netdev_ops ||
735 netdev->netdev_ops == &qeth_l2_osa_netdev_ops));
736 }
737
738 /**
739 * qeth_l2_br2dev_worker() - update local MACs
740 * @work: bridge to device FDB update
741 *
742 * Update local MACs of a learning_sync bridgeport so it can receive
743 * messages for a destination port.
744 * In case of an isolated learning_sync port, also update its isolated
745 * siblings.
746 */
qeth_l2_br2dev_worker(struct work_struct * work)747 static void qeth_l2_br2dev_worker(struct work_struct *work)
748 {
749 struct qeth_l2_br2dev_event_work *br2dev_event_work =
750 container_of(work, struct qeth_l2_br2dev_event_work, work);
751 struct net_device *lsyncdev = br2dev_event_work->lsync_dev;
752 struct net_device *dstdev = br2dev_event_work->dst_dev;
753 struct net_device *brdev = br2dev_event_work->br_dev;
754 unsigned long event = br2dev_event_work->event;
755 unsigned char *addr = br2dev_event_work->addr;
756 struct qeth_card *card = lsyncdev->ml_priv;
757 struct net_device *lowerdev;
758 struct list_head *iter;
759 int err = 0;
760
761 QETH_CARD_TEXT_(card, 4, "b2dw%04lx", event);
762 QETH_CARD_TEXT_(card, 4, "ma%012llx", ether_addr_to_u64(addr));
763
764 rcu_read_lock();
765 /* Verify preconditions are still valid: */
766 if (!netif_is_bridge_port(lsyncdev) ||
767 brdev != netdev_master_upper_dev_get_rcu(lsyncdev))
768 goto unlock;
769 if (!qeth_l2_must_learn(lsyncdev, dstdev))
770 goto unlock;
771
772 if (br_port_flag_is_set(lsyncdev, BR_ISOLATED)) {
773 /* Update lsyncdev and its isolated sibling(s): */
774 iter = &brdev->adj_list.lower;
775 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
776 while (lowerdev) {
777 if (br_port_flag_is_set(lowerdev, BR_ISOLATED)) {
778 switch (event) {
779 case SWITCHDEV_FDB_ADD_TO_DEVICE:
780 err = dev_uc_add(lowerdev, addr);
781 break;
782 case SWITCHDEV_FDB_DEL_TO_DEVICE:
783 err = dev_uc_del(lowerdev, addr);
784 break;
785 default:
786 break;
787 }
788 if (err) {
789 QETH_CARD_TEXT(card, 2, "b2derris");
790 QETH_CARD_TEXT_(card, 2,
791 "err%02lx%03d", event,
792 lowerdev->ifindex);
793 }
794 }
795 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
796 }
797 } else {
798 switch (event) {
799 case SWITCHDEV_FDB_ADD_TO_DEVICE:
800 err = dev_uc_add(lsyncdev, addr);
801 break;
802 case SWITCHDEV_FDB_DEL_TO_DEVICE:
803 err = dev_uc_del(lsyncdev, addr);
804 break;
805 default:
806 break;
807 }
808 if (err)
809 QETH_CARD_TEXT_(card, 2, "b2derr%02lx", event);
810 }
811
812 unlock:
813 rcu_read_unlock();
814 dev_put(brdev);
815 dev_put(lsyncdev);
816 dev_put(dstdev);
817 kfree(br2dev_event_work);
818 }
819
qeth_l2_br2dev_queue_work(struct net_device * brdev,struct net_device * lsyncdev,struct net_device * dstdev,unsigned long event,const unsigned char * addr)820 static int qeth_l2_br2dev_queue_work(struct net_device *brdev,
821 struct net_device *lsyncdev,
822 struct net_device *dstdev,
823 unsigned long event,
824 const unsigned char *addr)
825 {
826 struct qeth_l2_br2dev_event_work *worker_data;
827 struct qeth_card *card;
828
829 worker_data = kzalloc(sizeof(*worker_data), GFP_ATOMIC);
830 if (!worker_data)
831 return -ENOMEM;
832 INIT_WORK(&worker_data->work, qeth_l2_br2dev_worker);
833 worker_data->br_dev = brdev;
834 worker_data->lsync_dev = lsyncdev;
835 worker_data->dst_dev = dstdev;
836 worker_data->event = event;
837 ether_addr_copy(worker_data->addr, addr);
838
839 card = lsyncdev->ml_priv;
840 /* Take a reference on the sw port devices and the bridge */
841 dev_hold(brdev);
842 dev_hold(lsyncdev);
843 dev_hold(dstdev);
844 queue_work(card->event_wq, &worker_data->work);
845 return 0;
846 }
847
848 /* Called under rtnl_lock */
qeth_l2_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)849 static int qeth_l2_switchdev_event(struct notifier_block *unused,
850 unsigned long event, void *ptr)
851 {
852 struct net_device *dstdev, *brdev, *lowerdev;
853 struct switchdev_notifier_fdb_info *fdb_info;
854 struct switchdev_notifier_info *info = ptr;
855 struct list_head *iter;
856 struct qeth_card *card;
857 int rc;
858
859 if (!(event == SWITCHDEV_FDB_ADD_TO_DEVICE ||
860 event == SWITCHDEV_FDB_DEL_TO_DEVICE))
861 return NOTIFY_DONE;
862
863 dstdev = switchdev_notifier_info_to_dev(info);
864 brdev = netdev_master_upper_dev_get_rcu(dstdev);
865 if (!brdev || !netif_is_bridge_master(brdev))
866 return NOTIFY_DONE;
867 fdb_info = container_of(info,
868 struct switchdev_notifier_fdb_info,
869 info);
870 iter = &brdev->adj_list.lower;
871 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
872 while (lowerdev) {
873 if (qeth_l2_must_learn(lowerdev, dstdev)) {
874 card = lowerdev->ml_priv;
875 QETH_CARD_TEXT_(card, 4, "b2dqw%03lx", event);
876 rc = qeth_l2_br2dev_queue_work(brdev, lowerdev,
877 dstdev, event,
878 fdb_info->addr);
879 if (rc) {
880 QETH_CARD_TEXT(card, 2, "b2dqwerr");
881 return NOTIFY_BAD;
882 }
883 }
884 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
885 }
886 return NOTIFY_DONE;
887 }
888
889 static struct notifier_block qeth_l2_sw_notifier = {
890 .notifier_call = qeth_l2_switchdev_event,
891 };
892
893 static refcount_t qeth_l2_switchdev_notify_refcnt;
894
895 /* Called under rtnl_lock */
qeth_l2_br2dev_get(void)896 static void qeth_l2_br2dev_get(void)
897 {
898 int rc;
899
900 if (!refcount_inc_not_zero(&qeth_l2_switchdev_notify_refcnt)) {
901 rc = register_switchdev_notifier(&qeth_l2_sw_notifier);
902 if (rc) {
903 QETH_DBF_MESSAGE(2,
904 "failed to register qeth_l2_sw_notifier: %d\n",
905 rc);
906 } else {
907 refcount_set(&qeth_l2_switchdev_notify_refcnt, 1);
908 QETH_DBF_MESSAGE(2, "qeth_l2_sw_notifier registered\n");
909 }
910 }
911 QETH_DBF_TEXT_(SETUP, 2, "b2d+%04d",
912 qeth_l2_switchdev_notify_refcnt.refs.counter);
913 }
914
915 /* Called under rtnl_lock */
qeth_l2_br2dev_put(void)916 static void qeth_l2_br2dev_put(void)
917 {
918 int rc;
919
920 if (refcount_dec_and_test(&qeth_l2_switchdev_notify_refcnt)) {
921 rc = unregister_switchdev_notifier(&qeth_l2_sw_notifier);
922 if (rc) {
923 QETH_DBF_MESSAGE(2,
924 "failed to unregister qeth_l2_sw_notifier: %d\n",
925 rc);
926 } else {
927 QETH_DBF_MESSAGE(2,
928 "qeth_l2_sw_notifier unregistered\n");
929 }
930 }
931 QETH_DBF_TEXT_(SETUP, 2, "b2d-%04d",
932 qeth_l2_switchdev_notify_refcnt.refs.counter);
933 }
934
qeth_l2_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)935 static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
936 struct net_device *dev, u32 filter_mask,
937 int nlflags)
938 {
939 struct qeth_priv *priv = netdev_priv(dev);
940 struct qeth_card *card = dev->ml_priv;
941 u16 mode = BRIDGE_MODE_UNDEF;
942
943 /* Do not even show qeth devs that cannot do bridge_setlink */
944 if (!priv->brport_hw_features || !netif_device_present(dev) ||
945 qeth_bridgeport_is_in_use(card))
946 return -EOPNOTSUPP;
947
948 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
949 mode, priv->brport_features,
950 priv->brport_hw_features,
951 nlflags, filter_mask, NULL);
952 }
953
954 static const struct nla_policy qeth_brport_policy[IFLA_BRPORT_MAX + 1] = {
955 [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 },
956 };
957
958 /**
959 * qeth_l2_bridge_setlink() - set bridgeport attributes
960 * @dev: netdevice
961 * @nlh: netlink message header
962 * @flags: bridge flags (here: BRIDGE_FLAGS_SELF)
963 * @extack: extended ACK report struct
964 *
965 * Called under rtnl_lock
966 */
qeth_l2_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)967 static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
968 u16 flags, struct netlink_ext_ack *extack)
969 {
970 struct qeth_priv *priv = netdev_priv(dev);
971 struct nlattr *bp_tb[IFLA_BRPORT_MAX + 1];
972 struct qeth_card *card = dev->ml_priv;
973 struct nlattr *attr, *nested_attr;
974 bool enable, has_protinfo = false;
975 int rem1, rem2;
976 int rc;
977
978 if (!netif_device_present(dev))
979 return -ENODEV;
980
981 nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) {
982 if (nla_type(attr) == IFLA_PROTINFO) {
983 rc = nla_parse_nested(bp_tb, IFLA_BRPORT_MAX, attr,
984 qeth_brport_policy, extack);
985 if (rc)
986 return rc;
987 has_protinfo = true;
988 } else if (nla_type(attr) == IFLA_AF_SPEC) {
989 nla_for_each_nested(nested_attr, attr, rem2) {
990 if (nla_type(nested_attr) == IFLA_BRIDGE_FLAGS)
991 continue;
992 NL_SET_ERR_MSG_ATTR(extack, nested_attr,
993 "Unsupported attribute");
994 return -EINVAL;
995 }
996 } else {
997 NL_SET_ERR_MSG_ATTR(extack, attr, "Unsupported attribute");
998 return -EINVAL;
999 }
1000 }
1001 if (!has_protinfo)
1002 return 0;
1003 if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC])
1004 return -EINVAL;
1005 if (!(priv->brport_hw_features & BR_LEARNING_SYNC)) {
1006 NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
1007 "Operation not supported by HW");
1008 return -EOPNOTSUPP;
1009 }
1010 if (!IS_ENABLED(CONFIG_NET_SWITCHDEV)) {
1011 NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
1012 "Requires NET_SWITCHDEV");
1013 return -EOPNOTSUPP;
1014 }
1015 enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]);
1016
1017 if (enable == !!(priv->brport_features & BR_LEARNING_SYNC))
1018 return 0;
1019
1020 mutex_lock(&card->sbp_lock);
1021 /* do not change anything if BridgePort is enabled */
1022 if (qeth_bridgeport_is_in_use(card)) {
1023 NL_SET_ERR_MSG(extack, "n/a (BridgePort)");
1024 rc = -EBUSY;
1025 } else if (enable) {
1026 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
1027 rc = qeth_l2_dev2br_an_set(card, true);
1028 if (rc) {
1029 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1030 } else {
1031 priv->brport_features |= BR_LEARNING_SYNC;
1032 qeth_l2_br2dev_get();
1033 }
1034 } else {
1035 rc = qeth_l2_dev2br_an_set(card, false);
1036 if (!rc) {
1037 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1038 priv->brport_features ^= BR_LEARNING_SYNC;
1039 qeth_l2_dev2br_fdb_flush(card);
1040 qeth_l2_br2dev_put();
1041 }
1042 }
1043 mutex_unlock(&card->sbp_lock);
1044
1045 return rc;
1046 }
1047
1048 static const struct net_device_ops qeth_l2_iqd_netdev_ops = {
1049 .ndo_open = qeth_open,
1050 .ndo_stop = qeth_stop,
1051 .ndo_get_stats64 = qeth_get_stats64,
1052 .ndo_start_xmit = qeth_l2_hard_start_xmit,
1053 .ndo_features_check = qeth_features_check,
1054 .ndo_select_queue = qeth_l2_iqd_select_queue,
1055 .ndo_validate_addr = qeth_l2_validate_addr,
1056 .ndo_set_rx_mode = qeth_l2_set_rx_mode,
1057 .ndo_eth_ioctl = qeth_do_ioctl,
1058 .ndo_siocdevprivate = qeth_siocdevprivate,
1059 .ndo_set_mac_address = qeth_l2_set_mac_address,
1060 .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
1061 .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
1062 .ndo_tx_timeout = qeth_tx_timeout,
1063 .ndo_fix_features = qeth_fix_features,
1064 .ndo_set_features = qeth_set_features,
1065 .ndo_bridge_getlink = qeth_l2_bridge_getlink,
1066 .ndo_bridge_setlink = qeth_l2_bridge_setlink,
1067 };
1068
1069 static const struct net_device_ops qeth_l2_osa_netdev_ops = {
1070 .ndo_open = qeth_open,
1071 .ndo_stop = qeth_stop,
1072 .ndo_get_stats64 = qeth_get_stats64,
1073 .ndo_start_xmit = qeth_l2_hard_start_xmit,
1074 .ndo_features_check = qeth_features_check,
1075 .ndo_select_queue = qeth_osa_select_queue,
1076 .ndo_validate_addr = qeth_l2_validate_addr,
1077 .ndo_set_rx_mode = qeth_l2_set_rx_mode,
1078 .ndo_eth_ioctl = qeth_do_ioctl,
1079 .ndo_siocdevprivate = qeth_siocdevprivate,
1080 .ndo_set_mac_address = qeth_l2_set_mac_address,
1081 .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
1082 .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
1083 .ndo_tx_timeout = qeth_tx_timeout,
1084 .ndo_fix_features = qeth_fix_features,
1085 .ndo_set_features = qeth_set_features,
1086 };
1087
qeth_l2_setup_netdev(struct qeth_card * card)1088 static int qeth_l2_setup_netdev(struct qeth_card *card)
1089 {
1090 card->dev->netdev_ops = IS_IQD(card) ? &qeth_l2_iqd_netdev_ops :
1091 &qeth_l2_osa_netdev_ops;
1092 card->dev->needed_headroom = sizeof(struct qeth_hdr);
1093 card->dev->priv_flags |= IFF_UNICAST_FLT;
1094
1095 if (IS_OSM(card)) {
1096 card->dev->features |= NETIF_F_VLAN_CHALLENGED;
1097 } else {
1098 if (!IS_VM_NIC(card))
1099 card->dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1100 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1101 }
1102
1103 if (IS_OSD(card) && !IS_VM_NIC(card)) {
1104 card->dev->features |= NETIF_F_SG;
1105 /* OSA 3S and earlier has no RX/TX support */
1106 if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
1107 card->dev->hw_features |= NETIF_F_IP_CSUM;
1108 card->dev->vlan_features |= NETIF_F_IP_CSUM;
1109 }
1110 }
1111 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
1112 card->dev->hw_features |= NETIF_F_IPV6_CSUM;
1113 card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
1114 }
1115 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) ||
1116 qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) {
1117 card->dev->hw_features |= NETIF_F_RXCSUM;
1118 card->dev->vlan_features |= NETIF_F_RXCSUM;
1119 }
1120 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
1121 card->dev->hw_features |= NETIF_F_TSO;
1122 card->dev->vlan_features |= NETIF_F_TSO;
1123 }
1124 if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
1125 card->dev->hw_features |= NETIF_F_TSO6;
1126 card->dev->vlan_features |= NETIF_F_TSO6;
1127 }
1128
1129 if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1130 card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
1131 netif_keep_dst(card->dev);
1132 netif_set_tso_max_size(card->dev,
1133 PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
1134 }
1135
1136 netif_napi_add(card->dev, &card->napi, qeth_poll);
1137 return register_netdev(card->dev);
1138 }
1139
qeth_l2_trace_features(struct qeth_card * card)1140 static void qeth_l2_trace_features(struct qeth_card *card)
1141 {
1142 /* Set BridgePort features */
1143 QETH_CARD_TEXT(card, 2, "featuSBP");
1144 QETH_CARD_HEX(card, 2, &card->options.sbp.supported_funcs,
1145 sizeof(card->options.sbp.supported_funcs));
1146 /* VNIC Characteristics features */
1147 QETH_CARD_TEXT(card, 2, "feaVNICC");
1148 QETH_CARD_HEX(card, 2, &card->options.vnicc.sup_chars,
1149 sizeof(card->options.vnicc.sup_chars));
1150 }
1151
qeth_l2_setup_bridgeport_attrs(struct qeth_card * card)1152 static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
1153 {
1154 if (!card->options.sbp.reflect_promisc &&
1155 card->options.sbp.role != QETH_SBP_ROLE_NONE) {
1156 /* Conditional to avoid spurious error messages */
1157 qeth_bridgeport_setrole(card, card->options.sbp.role);
1158 /* Let the callback function refresh the stored role value. */
1159 qeth_bridgeport_query_ports(card, &card->options.sbp.role,
1160 NULL);
1161 }
1162 if (card->options.sbp.hostnotification) {
1163 if (qeth_bridgeport_an_set(card, 1))
1164 card->options.sbp.hostnotification = 0;
1165 }
1166 }
1167
1168 /**
1169 * qeth_l2_detect_dev2br_support() -
1170 * Detect whether this card supports 'dev to bridge fdb network address
1171 * change notification' and thus can support the learning_sync bridgeport
1172 * attribute
1173 * @card: qeth_card structure pointer
1174 */
qeth_l2_detect_dev2br_support(struct qeth_card * card)1175 static void qeth_l2_detect_dev2br_support(struct qeth_card *card)
1176 {
1177 struct qeth_priv *priv = netdev_priv(card->dev);
1178 bool dev2br_supported;
1179
1180 QETH_CARD_TEXT(card, 2, "d2brsup");
1181 if (!IS_IQD(card))
1182 return;
1183
1184 /* dev2br requires valid cssid,iid,chid */
1185 dev2br_supported = card->info.ids_valid &&
1186 css_general_characteristics.enarf;
1187 QETH_CARD_TEXT_(card, 2, "D2Bsup%02x", dev2br_supported);
1188
1189 if (dev2br_supported)
1190 priv->brport_hw_features |= BR_LEARNING_SYNC;
1191 else
1192 priv->brport_hw_features &= ~BR_LEARNING_SYNC;
1193 }
1194
qeth_l2_enable_brport_features(struct qeth_card * card)1195 static void qeth_l2_enable_brport_features(struct qeth_card *card)
1196 {
1197 struct qeth_priv *priv = netdev_priv(card->dev);
1198 int rc;
1199
1200 if (priv->brport_features & BR_LEARNING_SYNC) {
1201 if (priv->brport_hw_features & BR_LEARNING_SYNC) {
1202 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
1203 rc = qeth_l2_dev2br_an_set(card, true);
1204 if (rc == -EAGAIN) {
1205 /* Recoverable error, retry once */
1206 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1207 qeth_l2_dev2br_fdb_flush(card);
1208 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
1209 rc = qeth_l2_dev2br_an_set(card, true);
1210 }
1211 if (rc) {
1212 netdev_err(card->dev,
1213 "failed to enable bridge learning_sync: %d\n",
1214 rc);
1215 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1216 qeth_l2_dev2br_fdb_flush(card);
1217 priv->brport_features ^= BR_LEARNING_SYNC;
1218 }
1219 } else {
1220 dev_warn(&card->gdev->dev,
1221 "bridge learning_sync not supported\n");
1222 priv->brport_features ^= BR_LEARNING_SYNC;
1223 }
1224 }
1225 }
1226
1227 /* SETBRIDGEPORT support, async notifications */
1228
1229 enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
1230
1231 /**
1232 * qeth_bridge_emit_host_event() - bridgeport address change notification
1233 * @card: qeth_card structure pointer, for udev events.
1234 * @evtype: "normal" register/unregister, or abort, or reset. For abort
1235 * and reset token and addr_lnid are unused and may be NULL.
1236 * @code: event bitmask: high order bit 0x80 value 1 means removal of an
1237 * object, 0 - addition of an object.
1238 * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC.
1239 * @token: "network token" structure identifying physical address of the port.
1240 * @addr_lnid: pointer to structure with MAC address and VLAN ID.
1241 *
1242 * This function is called when registrations and deregistrations are
1243 * reported by the hardware, and also when notifications are enabled -
1244 * for all currently registered addresses.
1245 */
qeth_bridge_emit_host_event(struct qeth_card * card,enum qeth_an_event_type evtype,u8 code,struct net_if_token * token,struct mac_addr_lnid * addr_lnid)1246 static void qeth_bridge_emit_host_event(struct qeth_card *card,
1247 enum qeth_an_event_type evtype,
1248 u8 code,
1249 struct net_if_token *token,
1250 struct mac_addr_lnid *addr_lnid)
1251 {
1252 char str[7][32];
1253 char *env[8];
1254 int i = 0;
1255
1256 switch (evtype) {
1257 case anev_reg_unreg:
1258 scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s",
1259 (code & IPA_ADDR_CHANGE_CODE_REMOVAL)
1260 ? "deregister" : "register");
1261 env[i] = str[i]; i++;
1262 if (code & IPA_ADDR_CHANGE_CODE_VLANID) {
1263 scnprintf(str[i], sizeof(str[i]), "VLAN=%d",
1264 addr_lnid->lnid);
1265 env[i] = str[i]; i++;
1266 }
1267 if (code & IPA_ADDR_CHANGE_CODE_MACADDR) {
1268 scnprintf(str[i], sizeof(str[i]), "MAC=%pM",
1269 addr_lnid->mac);
1270 env[i] = str[i]; i++;
1271 }
1272 scnprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
1273 token->cssid, token->ssid, token->devnum);
1274 env[i] = str[i]; i++;
1275 scnprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid);
1276 env[i] = str[i]; i++;
1277 scnprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x",
1278 token->chpid);
1279 env[i] = str[i]; i++;
1280 scnprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x",
1281 token->chid);
1282 env[i] = str[i]; i++;
1283 break;
1284 case anev_abort:
1285 scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort");
1286 env[i] = str[i]; i++;
1287 break;
1288 case anev_reset:
1289 scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset");
1290 env[i] = str[i]; i++;
1291 break;
1292 }
1293 env[i] = NULL;
1294 kobject_uevent_env(&card->gdev->dev.kobj, KOBJ_CHANGE, env);
1295 }
1296
1297 struct qeth_bridge_state_data {
1298 struct work_struct worker;
1299 struct qeth_card *card;
1300 u8 role;
1301 u8 state;
1302 };
1303
qeth_bridge_state_change_worker(struct work_struct * work)1304 static void qeth_bridge_state_change_worker(struct work_struct *work)
1305 {
1306 struct qeth_bridge_state_data *data =
1307 container_of(work, struct qeth_bridge_state_data, worker);
1308 char env_locrem[32];
1309 char env_role[32];
1310 char env_state[32];
1311 char *env[] = {
1312 env_locrem,
1313 env_role,
1314 env_state,
1315 NULL
1316 };
1317
1318 scnprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
1319 scnprintf(env_role, sizeof(env_role), "ROLE=%s",
1320 (data->role == QETH_SBP_ROLE_NONE) ? "none" :
1321 (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
1322 (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
1323 "<INVALID>");
1324 scnprintf(env_state, sizeof(env_state), "STATE=%s",
1325 (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
1326 (data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
1327 (data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
1328 "<INVALID>");
1329 kobject_uevent_env(&data->card->gdev->dev.kobj,
1330 KOBJ_CHANGE, env);
1331 kfree(data);
1332 }
1333
qeth_bridge_state_change(struct qeth_card * card,struct qeth_ipa_cmd * cmd)1334 static void qeth_bridge_state_change(struct qeth_card *card,
1335 struct qeth_ipa_cmd *cmd)
1336 {
1337 struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data;
1338 struct qeth_bridge_state_data *data;
1339
1340 QETH_CARD_TEXT(card, 2, "brstchng");
1341 if (qports->num_entries == 0) {
1342 QETH_CARD_TEXT(card, 2, "BPempty");
1343 return;
1344 }
1345 if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
1346 QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
1347 return;
1348 }
1349
1350 data = kzalloc(sizeof(*data), GFP_ATOMIC);
1351 if (!data) {
1352 QETH_CARD_TEXT(card, 2, "BPSalloc");
1353 return;
1354 }
1355 INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
1356 data->card = card;
1357 /* Information for the local port: */
1358 data->role = qports->entry[0].role;
1359 data->state = qports->entry[0].state;
1360
1361 queue_work(card->event_wq, &data->worker);
1362 }
1363
1364 struct qeth_addr_change_data {
1365 struct delayed_work dwork;
1366 struct qeth_card *card;
1367 struct qeth_ipacmd_addr_change ac_event;
1368 };
1369
qeth_l2_dev2br_worker(struct work_struct * work)1370 static void qeth_l2_dev2br_worker(struct work_struct *work)
1371 {
1372 struct delayed_work *dwork = to_delayed_work(work);
1373 struct qeth_addr_change_data *data;
1374 struct qeth_card *card;
1375 struct qeth_priv *priv;
1376 unsigned int i;
1377 int rc;
1378
1379 data = container_of(dwork, struct qeth_addr_change_data, dwork);
1380 card = data->card;
1381 priv = netdev_priv(card->dev);
1382
1383 QETH_CARD_TEXT(card, 4, "dev2brew");
1384
1385 if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
1386 goto free;
1387
1388 if (data->ac_event.lost_event_mask) {
1389 /* Potential re-config in progress, try again later: */
1390 if (!rtnl_trylock()) {
1391 queue_delayed_work(card->event_wq, dwork,
1392 msecs_to_jiffies(100));
1393 return;
1394 }
1395
1396 if (!netif_device_present(card->dev)) {
1397 rtnl_unlock();
1398 goto free;
1399 }
1400
1401 QETH_DBF_MESSAGE(3,
1402 "Address change notification overflow on device %x\n",
1403 CARD_DEVID(card));
1404 /* Card fdb and bridge fdb are out of sync, card has stopped
1405 * notifications (no need to drain_workqueue). Purge all
1406 * 'extern_learn' entries from the parent bridge and restart
1407 * the notifications.
1408 */
1409 qeth_l2_dev2br_fdb_flush(card);
1410 rc = qeth_l2_dev2br_an_set(card, true);
1411 if (rc) {
1412 /* TODO: if we want to retry after -EAGAIN, be
1413 * aware there could be stale entries in the
1414 * workqueue now, that need to be drained.
1415 * For now we give up:
1416 */
1417 netdev_err(card->dev,
1418 "bridge learning_sync failed to recover: %d\n",
1419 rc);
1420 WRITE_ONCE(card->info.pnso_mode,
1421 QETH_PNSO_NONE);
1422 /* To remove fdb entries reported by an_set: */
1423 qeth_l2_dev2br_fdb_flush(card);
1424 priv->brport_features ^= BR_LEARNING_SYNC;
1425 } else {
1426 QETH_DBF_MESSAGE(3,
1427 "Address Notification resynced on device %x\n",
1428 CARD_DEVID(card));
1429 }
1430
1431 rtnl_unlock();
1432 } else {
1433 for (i = 0; i < data->ac_event.num_entries; i++) {
1434 struct qeth_ipacmd_addr_change_entry *entry =
1435 &data->ac_event.entry[i];
1436 qeth_l2_dev2br_fdb_notify(card,
1437 entry->change_code,
1438 &entry->token,
1439 &entry->addr_lnid);
1440 }
1441 }
1442
1443 free:
1444 kfree(data);
1445 }
1446
qeth_addr_change_event_worker(struct work_struct * work)1447 static void qeth_addr_change_event_worker(struct work_struct *work)
1448 {
1449 struct delayed_work *dwork = to_delayed_work(work);
1450 struct qeth_addr_change_data *data;
1451 struct qeth_card *card;
1452 int i;
1453
1454 data = container_of(dwork, struct qeth_addr_change_data, dwork);
1455 card = data->card;
1456
1457 QETH_CARD_TEXT(data->card, 4, "adrchgew");
1458
1459 if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
1460 goto free;
1461
1462 if (data->ac_event.lost_event_mask) {
1463 /* Potential re-config in progress, try again later: */
1464 if (!mutex_trylock(&card->sbp_lock)) {
1465 queue_delayed_work(card->event_wq, dwork,
1466 msecs_to_jiffies(100));
1467 return;
1468 }
1469
1470 dev_info(&data->card->gdev->dev,
1471 "Address change notification stopped on %s (%s)\n",
1472 netdev_name(card->dev),
1473 (data->ac_event.lost_event_mask == 0x01)
1474 ? "Overflow"
1475 : (data->ac_event.lost_event_mask == 0x02)
1476 ? "Bridge port state change"
1477 : "Unknown reason");
1478
1479 data->card->options.sbp.hostnotification = 0;
1480 card->info.pnso_mode = QETH_PNSO_NONE;
1481 mutex_unlock(&data->card->sbp_lock);
1482 qeth_bridge_emit_host_event(data->card, anev_abort,
1483 0, NULL, NULL);
1484 } else
1485 for (i = 0; i < data->ac_event.num_entries; i++) {
1486 struct qeth_ipacmd_addr_change_entry *entry =
1487 &data->ac_event.entry[i];
1488 qeth_bridge_emit_host_event(data->card,
1489 anev_reg_unreg,
1490 entry->change_code,
1491 &entry->token,
1492 &entry->addr_lnid);
1493 }
1494
1495 free:
1496 kfree(data);
1497 }
1498
qeth_addr_change_event(struct qeth_card * card,struct qeth_ipa_cmd * cmd)1499 static void qeth_addr_change_event(struct qeth_card *card,
1500 struct qeth_ipa_cmd *cmd)
1501 {
1502 struct qeth_ipacmd_addr_change *hostevs =
1503 &cmd->data.addrchange;
1504 struct qeth_addr_change_data *data;
1505 int extrasize;
1506
1507 if (card->info.pnso_mode == QETH_PNSO_NONE)
1508 return;
1509
1510 QETH_CARD_TEXT(card, 4, "adrchgev");
1511 if (cmd->hdr.return_code != 0x0000) {
1512 if (cmd->hdr.return_code == 0x0010) {
1513 if (hostevs->lost_event_mask == 0x00)
1514 hostevs->lost_event_mask = 0xff;
1515 } else {
1516 QETH_CARD_TEXT_(card, 2, "ACHN%04x",
1517 cmd->hdr.return_code);
1518 return;
1519 }
1520 }
1521 extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) *
1522 hostevs->num_entries;
1523 data = kzalloc(sizeof(struct qeth_addr_change_data) + extrasize,
1524 GFP_ATOMIC);
1525 if (!data) {
1526 QETH_CARD_TEXT(card, 2, "ACNalloc");
1527 return;
1528 }
1529 if (card->info.pnso_mode == QETH_PNSO_BRIDGEPORT)
1530 INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
1531 else
1532 INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker);
1533 data->card = card;
1534 data->ac_event = *hostevs;
1535 memcpy(data->ac_event.entry, hostevs->entry, extrasize);
1536 queue_delayed_work(card->event_wq, &data->dwork, 0);
1537 }
1538
1539 /* SETBRIDGEPORT support; sending commands */
1540
1541 struct _qeth_sbp_cbctl {
1542 union {
1543 u32 supported;
1544 struct {
1545 enum qeth_sbp_roles *role;
1546 enum qeth_sbp_states *state;
1547 } qports;
1548 } data;
1549 };
1550
qeth_bridgeport_makerc(struct qeth_card * card,struct qeth_ipa_cmd * cmd)1551 static int qeth_bridgeport_makerc(struct qeth_card *card,
1552 struct qeth_ipa_cmd *cmd)
1553 {
1554 struct qeth_ipacmd_setbridgeport *sbp = &cmd->data.sbp;
1555 enum qeth_ipa_sbp_cmd setcmd = sbp->hdr.command_code;
1556 u16 ipa_rc = cmd->hdr.return_code;
1557 u16 sbp_rc = sbp->hdr.return_code;
1558 int rc;
1559
1560 if (ipa_rc == IPA_RC_SUCCESS && sbp_rc == IPA_RC_SUCCESS)
1561 return 0;
1562
1563 if ((IS_IQD(card) && ipa_rc == IPA_RC_SUCCESS) ||
1564 (!IS_IQD(card) && ipa_rc == sbp_rc)) {
1565 switch (sbp_rc) {
1566 case IPA_RC_SUCCESS:
1567 rc = 0;
1568 break;
1569 case IPA_RC_L2_UNSUPPORTED_CMD:
1570 case IPA_RC_UNSUPPORTED_COMMAND:
1571 rc = -EOPNOTSUPP;
1572 break;
1573 case IPA_RC_SBP_OSA_NOT_CONFIGURED:
1574 case IPA_RC_SBP_IQD_NOT_CONFIGURED:
1575 rc = -ENODEV; /* maybe not the best code here? */
1576 dev_err(&card->gdev->dev,
1577 "The device is not configured as a Bridge Port\n");
1578 break;
1579 case IPA_RC_SBP_OSA_OS_MISMATCH:
1580 case IPA_RC_SBP_IQD_OS_MISMATCH:
1581 rc = -EPERM;
1582 dev_err(&card->gdev->dev,
1583 "A Bridge Port is already configured by a different operating system\n");
1584 break;
1585 case IPA_RC_SBP_OSA_ANO_DEV_PRIMARY:
1586 case IPA_RC_SBP_IQD_ANO_DEV_PRIMARY:
1587 switch (setcmd) {
1588 case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
1589 rc = -EEXIST;
1590 dev_err(&card->gdev->dev,
1591 "The LAN already has a primary Bridge Port\n");
1592 break;
1593 case IPA_SBP_SET_SECONDARY_BRIDGE_PORT:
1594 rc = -EBUSY;
1595 dev_err(&card->gdev->dev,
1596 "The device is already a primary Bridge Port\n");
1597 break;
1598 default:
1599 rc = -EIO;
1600 }
1601 break;
1602 case IPA_RC_SBP_OSA_CURRENT_SECOND:
1603 case IPA_RC_SBP_IQD_CURRENT_SECOND:
1604 rc = -EBUSY;
1605 dev_err(&card->gdev->dev,
1606 "The device is already a secondary Bridge Port\n");
1607 break;
1608 case IPA_RC_SBP_OSA_LIMIT_SECOND:
1609 case IPA_RC_SBP_IQD_LIMIT_SECOND:
1610 rc = -EEXIST;
1611 dev_err(&card->gdev->dev,
1612 "The LAN cannot have more secondary Bridge Ports\n");
1613 break;
1614 case IPA_RC_SBP_OSA_CURRENT_PRIMARY:
1615 case IPA_RC_SBP_IQD_CURRENT_PRIMARY:
1616 rc = -EBUSY;
1617 dev_err(&card->gdev->dev,
1618 "The device is already a primary Bridge Port\n");
1619 break;
1620 case IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN:
1621 case IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN:
1622 rc = -EACCES;
1623 dev_err(&card->gdev->dev,
1624 "The device is not authorized to be a Bridge Port\n");
1625 break;
1626 default:
1627 rc = -EIO;
1628 }
1629 } else {
1630 switch (ipa_rc) {
1631 case IPA_RC_NOTSUPP:
1632 rc = -EOPNOTSUPP;
1633 break;
1634 case IPA_RC_UNSUPPORTED_COMMAND:
1635 rc = -EOPNOTSUPP;
1636 break;
1637 default:
1638 rc = -EIO;
1639 }
1640 }
1641
1642 if (rc) {
1643 QETH_CARD_TEXT_(card, 2, "SBPi%04x", ipa_rc);
1644 QETH_CARD_TEXT_(card, 2, "SBPc%04x", sbp_rc);
1645 }
1646 return rc;
1647 }
1648
qeth_sbp_build_cmd(struct qeth_card * card,enum qeth_ipa_sbp_cmd sbp_cmd,unsigned int data_length)1649 static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card,
1650 enum qeth_ipa_sbp_cmd sbp_cmd,
1651 unsigned int data_length)
1652 {
1653 enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD :
1654 IPA_CMD_SETBRIDGEPORT_OSA;
1655 struct qeth_ipacmd_sbp_hdr *hdr;
1656 struct qeth_cmd_buffer *iob;
1657
1658 iob = qeth_ipa_alloc_cmd(card, ipa_cmd, QETH_PROT_NONE,
1659 data_length +
1660 offsetof(struct qeth_ipacmd_setbridgeport,
1661 data));
1662 if (!iob)
1663 return iob;
1664
1665 hdr = &__ipa_cmd(iob)->data.sbp.hdr;
1666 hdr->cmdlength = sizeof(*hdr) + data_length;
1667 hdr->command_code = sbp_cmd;
1668 hdr->used_total = 1;
1669 hdr->seq_no = 1;
1670 return iob;
1671 }
1672
qeth_bridgeport_query_support_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1673 static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
1674 struct qeth_reply *reply, unsigned long data)
1675 {
1676 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
1677 struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
1678 int rc;
1679
1680 QETH_CARD_TEXT(card, 2, "brqsupcb");
1681 rc = qeth_bridgeport_makerc(card, cmd);
1682 if (rc)
1683 return rc;
1684
1685 cbctl->data.supported =
1686 cmd->data.sbp.data.query_cmds_supp.supported_cmds;
1687 return 0;
1688 }
1689
1690 /**
1691 * qeth_bridgeport_query_support() - store bitmask of supported subfunctions.
1692 * @card: qeth_card structure pointer.
1693 *
1694 * Sets bitmask of supported setbridgeport subfunctions in the qeth_card
1695 * strucutre: card->options.sbp.supported_funcs.
1696 */
qeth_bridgeport_query_support(struct qeth_card * card)1697 static void qeth_bridgeport_query_support(struct qeth_card *card)
1698 {
1699 struct qeth_cmd_buffer *iob;
1700 struct _qeth_sbp_cbctl cbctl;
1701
1702 QETH_CARD_TEXT(card, 2, "brqsuppo");
1703 iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED,
1704 SBP_DATA_SIZEOF(query_cmds_supp));
1705 if (!iob)
1706 return;
1707
1708 if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb,
1709 &cbctl)) {
1710 card->options.sbp.role = QETH_SBP_ROLE_NONE;
1711 card->options.sbp.supported_funcs = 0;
1712 return;
1713 }
1714 card->options.sbp.supported_funcs = cbctl.data.supported;
1715 }
1716
qeth_bridgeport_query_ports_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1717 static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
1718 struct qeth_reply *reply, unsigned long data)
1719 {
1720 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
1721 struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
1722 struct qeth_sbp_port_data *qports;
1723 int rc;
1724
1725 QETH_CARD_TEXT(card, 2, "brqprtcb");
1726 rc = qeth_bridgeport_makerc(card, cmd);
1727 if (rc)
1728 return rc;
1729
1730 qports = &cmd->data.sbp.data.port_data;
1731 if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
1732 QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
1733 return -EINVAL;
1734 }
1735 /* first entry contains the state of the local port */
1736 if (qports->num_entries > 0) {
1737 if (cbctl->data.qports.role)
1738 *cbctl->data.qports.role = qports->entry[0].role;
1739 if (cbctl->data.qports.state)
1740 *cbctl->data.qports.state = qports->entry[0].state;
1741 }
1742 return 0;
1743 }
1744
1745 /**
1746 * qeth_bridgeport_query_ports() - query local bridgeport status.
1747 * @card: qeth_card structure pointer.
1748 * @role: Role of the port: 0-none, 1-primary, 2-secondary.
1749 * @state: State of the port: 0-inactive, 1-standby, 2-active.
1750 *
1751 * Returns negative errno-compatible error indication or 0 on success.
1752 *
1753 * 'role' and 'state' are not updated in case of hardware operation failure.
1754 */
qeth_bridgeport_query_ports(struct qeth_card * card,enum qeth_sbp_roles * role,enum qeth_sbp_states * state)1755 int qeth_bridgeport_query_ports(struct qeth_card *card,
1756 enum qeth_sbp_roles *role, enum qeth_sbp_states *state)
1757 {
1758 struct qeth_cmd_buffer *iob;
1759 struct _qeth_sbp_cbctl cbctl = {
1760 .data = {
1761 .qports = {
1762 .role = role,
1763 .state = state,
1764 },
1765 },
1766 };
1767
1768 QETH_CARD_TEXT(card, 2, "brqports");
1769 if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
1770 return -EOPNOTSUPP;
1771 iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0);
1772 if (!iob)
1773 return -ENOMEM;
1774
1775 return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
1776 &cbctl);
1777 }
1778
qeth_bridgeport_set_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1779 static int qeth_bridgeport_set_cb(struct qeth_card *card,
1780 struct qeth_reply *reply, unsigned long data)
1781 {
1782 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
1783
1784 QETH_CARD_TEXT(card, 2, "brsetrcb");
1785 return qeth_bridgeport_makerc(card, cmd);
1786 }
1787
1788 /**
1789 * qeth_bridgeport_setrole() - Assign primary role to the port.
1790 * @card: qeth_card structure pointer.
1791 * @role: Role to assign.
1792 *
1793 * Returns negative errno-compatible error indication or 0 on success.
1794 */
qeth_bridgeport_setrole(struct qeth_card * card,enum qeth_sbp_roles role)1795 int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
1796 {
1797 struct qeth_cmd_buffer *iob;
1798 enum qeth_ipa_sbp_cmd setcmd;
1799 unsigned int cmdlength = 0;
1800
1801 QETH_CARD_TEXT(card, 2, "brsetrol");
1802 switch (role) {
1803 case QETH_SBP_ROLE_NONE:
1804 setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE;
1805 break;
1806 case QETH_SBP_ROLE_PRIMARY:
1807 setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT;
1808 cmdlength = SBP_DATA_SIZEOF(set_primary);
1809 break;
1810 case QETH_SBP_ROLE_SECONDARY:
1811 setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT;
1812 break;
1813 default:
1814 return -EINVAL;
1815 }
1816 if (!(card->options.sbp.supported_funcs & setcmd))
1817 return -EOPNOTSUPP;
1818 iob = qeth_sbp_build_cmd(card, setcmd, cmdlength);
1819 if (!iob)
1820 return -ENOMEM;
1821
1822 return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL);
1823 }
1824
qeth_bridgeport_an_set_cb(void * priv,struct chsc_pnso_naid_l2 * entry)1825 static void qeth_bridgeport_an_set_cb(void *priv,
1826 struct chsc_pnso_naid_l2 *entry)
1827 {
1828 struct qeth_card *card = (struct qeth_card *)priv;
1829 u8 code;
1830
1831 code = IPA_ADDR_CHANGE_CODE_MACADDR;
1832 if (entry->addr_lnid.lnid < VLAN_N_VID)
1833 code |= IPA_ADDR_CHANGE_CODE_VLANID;
1834 qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
1835 (struct net_if_token *)&entry->nit,
1836 (struct mac_addr_lnid *)&entry->addr_lnid);
1837 }
1838
1839 /**
1840 * qeth_bridgeport_an_set() - Enable or disable bridgeport address notification
1841 * @card: qeth_card structure pointer.
1842 * @enable: 0 - disable, non-zero - enable notifications
1843 *
1844 * Returns negative errno-compatible error indication or 0 on success.
1845 *
1846 * On enable, emits a series of address notifications udev events for all
1847 * currently registered hosts.
1848 */
qeth_bridgeport_an_set(struct qeth_card * card,int enable)1849 int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
1850 {
1851 int rc;
1852
1853 if (!card->options.sbp.supported_funcs)
1854 return -EOPNOTSUPP;
1855
1856 if (enable) {
1857 qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
1858 qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
1859 rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 1,
1860 qeth_bridgeport_an_set_cb, card);
1861 if (rc)
1862 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1863 } else {
1864 rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 0, NULL, NULL);
1865 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1866 }
1867 return rc;
1868 }
1869
1870 /* VNIC Characteristics support */
1871
1872 /* handle VNICC IPA command return codes; convert to error codes */
qeth_l2_vnicc_makerc(struct qeth_card * card,u16 ipa_rc)1873 static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
1874 {
1875 int rc;
1876
1877 switch (ipa_rc) {
1878 case IPA_RC_SUCCESS:
1879 return ipa_rc;
1880 case IPA_RC_L2_UNSUPPORTED_CMD:
1881 case IPA_RC_NOTSUPP:
1882 rc = -EOPNOTSUPP;
1883 break;
1884 case IPA_RC_VNICC_OOSEQ:
1885 rc = -EALREADY;
1886 break;
1887 case IPA_RC_VNICC_VNICBP:
1888 rc = -EBUSY;
1889 break;
1890 case IPA_RC_L2_ADDR_TABLE_FULL:
1891 rc = -ENOSPC;
1892 break;
1893 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
1894 rc = -EACCES;
1895 break;
1896 default:
1897 rc = -EIO;
1898 }
1899
1900 QETH_CARD_TEXT_(card, 2, "err%04x", ipa_rc);
1901 return rc;
1902 }
1903
1904 /* generic VNICC request call back */
qeth_l2_vnicc_request_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1905 static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
1906 struct qeth_reply *reply,
1907 unsigned long data)
1908 {
1909 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
1910 struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc;
1911 u32 sub_cmd = cmd->data.vnicc.hdr.sub_command;
1912
1913 QETH_CARD_TEXT(card, 2, "vniccrcb");
1914 if (cmd->hdr.return_code)
1915 return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code);
1916 /* return results to caller */
1917 card->options.vnicc.sup_chars = rep->vnicc_cmds.supported;
1918 card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
1919
1920 if (sub_cmd == IPA_VNICC_QUERY_CMDS)
1921 *(u32 *)reply->param = rep->data.query_cmds.sup_cmds;
1922 else if (sub_cmd == IPA_VNICC_GET_TIMEOUT)
1923 *(u32 *)reply->param = rep->data.getset_timeout.timeout;
1924
1925 return 0;
1926 }
1927
qeth_l2_vnicc_build_cmd(struct qeth_card * card,u32 vnicc_cmd,unsigned int data_length)1928 static struct qeth_cmd_buffer *qeth_l2_vnicc_build_cmd(struct qeth_card *card,
1929 u32 vnicc_cmd,
1930 unsigned int data_length)
1931 {
1932 struct qeth_ipacmd_vnicc_hdr *hdr;
1933 struct qeth_cmd_buffer *iob;
1934
1935 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_VNICC, QETH_PROT_NONE,
1936 data_length +
1937 offsetof(struct qeth_ipacmd_vnicc, data));
1938 if (!iob)
1939 return NULL;
1940
1941 hdr = &__ipa_cmd(iob)->data.vnicc.hdr;
1942 hdr->data_length = sizeof(*hdr) + data_length;
1943 hdr->sub_command = vnicc_cmd;
1944 return iob;
1945 }
1946
1947 /* VNICC query VNIC characteristics request */
qeth_l2_vnicc_query_chars(struct qeth_card * card)1948 static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
1949 {
1950 struct qeth_cmd_buffer *iob;
1951
1952 QETH_CARD_TEXT(card, 2, "vniccqch");
1953 iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CHARS, 0);
1954 if (!iob)
1955 return -ENOMEM;
1956
1957 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL);
1958 }
1959
1960 /* VNICC query sub commands request */
qeth_l2_vnicc_query_cmds(struct qeth_card * card,u32 vnic_char,u32 * sup_cmds)1961 static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
1962 u32 *sup_cmds)
1963 {
1964 struct qeth_cmd_buffer *iob;
1965
1966 QETH_CARD_TEXT(card, 2, "vniccqcm");
1967 iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CMDS,
1968 VNICC_DATA_SIZEOF(query_cmds));
1969 if (!iob)
1970 return -ENOMEM;
1971
1972 __ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
1973
1974 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds);
1975 }
1976
1977 /* VNICC enable/disable characteristic request */
qeth_l2_vnicc_set_char(struct qeth_card * card,u32 vnic_char,u32 cmd)1978 static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char,
1979 u32 cmd)
1980 {
1981 struct qeth_cmd_buffer *iob;
1982
1983 QETH_CARD_TEXT(card, 2, "vniccedc");
1984 iob = qeth_l2_vnicc_build_cmd(card, cmd, VNICC_DATA_SIZEOF(set_char));
1985 if (!iob)
1986 return -ENOMEM;
1987
1988 __ipa_cmd(iob)->data.vnicc.data.set_char.vnic_char = vnic_char;
1989
1990 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL);
1991 }
1992
1993 /* VNICC get/set timeout for characteristic request */
qeth_l2_vnicc_getset_timeout(struct qeth_card * card,u32 vnicc,u32 cmd,u32 * timeout)1994 static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
1995 u32 cmd, u32 *timeout)
1996 {
1997 struct qeth_vnicc_getset_timeout *getset_timeout;
1998 struct qeth_cmd_buffer *iob;
1999
2000 QETH_CARD_TEXT(card, 2, "vniccgst");
2001 iob = qeth_l2_vnicc_build_cmd(card, cmd,
2002 VNICC_DATA_SIZEOF(getset_timeout));
2003 if (!iob)
2004 return -ENOMEM;
2005
2006 getset_timeout = &__ipa_cmd(iob)->data.vnicc.data.getset_timeout;
2007 getset_timeout->vnic_char = vnicc;
2008
2009 if (cmd == IPA_VNICC_SET_TIMEOUT)
2010 getset_timeout->timeout = *timeout;
2011
2012 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout);
2013 }
2014
2015 /* recover user timeout setting */
qeth_l2_vnicc_recover_timeout(struct qeth_card * card,u32 vnicc,u32 * timeout)2016 static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
2017 u32 *timeout)
2018 {
2019 if (card->options.vnicc.sup_chars & vnicc &&
2020 card->options.vnicc.getset_timeout_sup & vnicc &&
2021 !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
2022 timeout))
2023 return false;
2024 *timeout = QETH_VNICC_DEFAULT_TIMEOUT;
2025 return true;
2026 }
2027
2028 /* set current VNICC flag state; called from sysfs store function */
qeth_l2_vnicc_set_state(struct qeth_card * card,u32 vnicc,bool state)2029 int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
2030 {
2031 int rc = 0;
2032 u32 cmd;
2033
2034 QETH_CARD_TEXT(card, 2, "vniccsch");
2035
2036 /* check if characteristic and enable/disable are supported */
2037 if (!(card->options.vnicc.sup_chars & vnicc) ||
2038 !(card->options.vnicc.set_char_sup & vnicc))
2039 return -EOPNOTSUPP;
2040
2041 if (qeth_bridgeport_is_in_use(card))
2042 return -EBUSY;
2043
2044 /* set enable/disable command and store wanted characteristic */
2045 if (state) {
2046 cmd = IPA_VNICC_ENABLE;
2047 card->options.vnicc.wanted_chars |= vnicc;
2048 } else {
2049 cmd = IPA_VNICC_DISABLE;
2050 card->options.vnicc.wanted_chars &= ~vnicc;
2051 }
2052
2053 /* do we need to do anything? */
2054 if (card->options.vnicc.cur_chars == card->options.vnicc.wanted_chars)
2055 return rc;
2056
2057 /* if card is not ready, simply stop here */
2058 if (!qeth_card_hw_is_reachable(card)) {
2059 if (state)
2060 card->options.vnicc.cur_chars |= vnicc;
2061 else
2062 card->options.vnicc.cur_chars &= ~vnicc;
2063 return rc;
2064 }
2065
2066 rc = qeth_l2_vnicc_set_char(card, vnicc, cmd);
2067 if (rc)
2068 card->options.vnicc.wanted_chars =
2069 card->options.vnicc.cur_chars;
2070 else {
2071 /* successful online VNICC change; handle special cases */
2072 if (state && vnicc == QETH_VNICC_RX_BCAST)
2073 card->options.vnicc.rx_bcast_enabled = true;
2074 if (!state && vnicc == QETH_VNICC_LEARNING)
2075 qeth_l2_vnicc_recover_timeout(card, vnicc,
2076 &card->options.vnicc.learning_timeout);
2077 }
2078
2079 return rc;
2080 }
2081
2082 /* get current VNICC flag state; called from sysfs show function */
qeth_l2_vnicc_get_state(struct qeth_card * card,u32 vnicc,bool * state)2083 int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state)
2084 {
2085 int rc = 0;
2086
2087 QETH_CARD_TEXT(card, 2, "vniccgch");
2088
2089 /* check if characteristic is supported */
2090 if (!(card->options.vnicc.sup_chars & vnicc))
2091 return -EOPNOTSUPP;
2092
2093 if (qeth_bridgeport_is_in_use(card))
2094 return -EBUSY;
2095
2096 /* if card is ready, query current VNICC state */
2097 if (qeth_card_hw_is_reachable(card))
2098 rc = qeth_l2_vnicc_query_chars(card);
2099
2100 *state = (card->options.vnicc.cur_chars & vnicc) ? true : false;
2101 return rc;
2102 }
2103
2104 /* set VNICC timeout; called from sysfs store function. Currently, only learning
2105 * supports timeout
2106 */
qeth_l2_vnicc_set_timeout(struct qeth_card * card,u32 timeout)2107 int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout)
2108 {
2109 int rc = 0;
2110
2111 QETH_CARD_TEXT(card, 2, "vniccsto");
2112
2113 /* check if characteristic and set_timeout are supported */
2114 if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
2115 !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
2116 return -EOPNOTSUPP;
2117
2118 if (qeth_bridgeport_is_in_use(card))
2119 return -EBUSY;
2120
2121 /* do we need to do anything? */
2122 if (card->options.vnicc.learning_timeout == timeout)
2123 return rc;
2124
2125 /* if card is not ready, simply store the value internally and return */
2126 if (!qeth_card_hw_is_reachable(card)) {
2127 card->options.vnicc.learning_timeout = timeout;
2128 return rc;
2129 }
2130
2131 /* send timeout value to card; if successful, store value internally */
2132 rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
2133 IPA_VNICC_SET_TIMEOUT, &timeout);
2134 if (!rc)
2135 card->options.vnicc.learning_timeout = timeout;
2136
2137 return rc;
2138 }
2139
2140 /* get current VNICC timeout; called from sysfs show function. Currently, only
2141 * learning supports timeout
2142 */
qeth_l2_vnicc_get_timeout(struct qeth_card * card,u32 * timeout)2143 int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
2144 {
2145 int rc = 0;
2146
2147 QETH_CARD_TEXT(card, 2, "vniccgto");
2148
2149 /* check if characteristic and get_timeout are supported */
2150 if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
2151 !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
2152 return -EOPNOTSUPP;
2153
2154 if (qeth_bridgeport_is_in_use(card))
2155 return -EBUSY;
2156
2157 /* if card is ready, get timeout. Otherwise, just return stored value */
2158 *timeout = card->options.vnicc.learning_timeout;
2159 if (qeth_card_hw_is_reachable(card))
2160 rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
2161 IPA_VNICC_GET_TIMEOUT,
2162 timeout);
2163
2164 return rc;
2165 }
2166
2167 /* check if VNICC is currently enabled */
_qeth_l2_vnicc_is_in_use(struct qeth_card * card)2168 static bool _qeth_l2_vnicc_is_in_use(struct qeth_card *card)
2169 {
2170 if (!card->options.vnicc.sup_chars)
2171 return false;
2172 /* default values are only OK if rx_bcast was not enabled by user
2173 * or the card is offline.
2174 */
2175 if (card->options.vnicc.cur_chars == QETH_VNICC_DEFAULT) {
2176 if (!card->options.vnicc.rx_bcast_enabled ||
2177 !qeth_card_hw_is_reachable(card))
2178 return false;
2179 }
2180 return true;
2181 }
2182
2183 /**
2184 * qeth_bridgeport_allowed - are any qeth_bridgeport functions allowed?
2185 * @card: qeth_card structure pointer
2186 *
2187 * qeth_bridgeport functionality is mutually exclusive with usage of the
2188 * VNIC Characteristics and dev2br address notifications
2189 */
qeth_bridgeport_allowed(struct qeth_card * card)2190 bool qeth_bridgeport_allowed(struct qeth_card *card)
2191 {
2192 struct qeth_priv *priv = netdev_priv(card->dev);
2193
2194 return (!_qeth_l2_vnicc_is_in_use(card) &&
2195 !(priv->brport_features & BR_LEARNING_SYNC));
2196 }
2197
2198 /* recover user characteristic setting */
qeth_l2_vnicc_recover_char(struct qeth_card * card,u32 vnicc,bool enable)2199 static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
2200 bool enable)
2201 {
2202 u32 cmd = enable ? IPA_VNICC_ENABLE : IPA_VNICC_DISABLE;
2203
2204 if (card->options.vnicc.sup_chars & vnicc &&
2205 card->options.vnicc.set_char_sup & vnicc &&
2206 !qeth_l2_vnicc_set_char(card, vnicc, cmd))
2207 return false;
2208 card->options.vnicc.wanted_chars &= ~vnicc;
2209 card->options.vnicc.wanted_chars |= QETH_VNICC_DEFAULT & vnicc;
2210 return true;
2211 }
2212
2213 /* (re-)initialize VNICC */
qeth_l2_vnicc_init(struct qeth_card * card)2214 static void qeth_l2_vnicc_init(struct qeth_card *card)
2215 {
2216 u32 *timeout = &card->options.vnicc.learning_timeout;
2217 bool enable, error = false;
2218 unsigned int chars_len, i;
2219 unsigned long chars_tmp;
2220 u32 sup_cmds, vnicc;
2221
2222 QETH_CARD_TEXT(card, 2, "vniccini");
2223 /* reset rx_bcast */
2224 card->options.vnicc.rx_bcast_enabled = 0;
2225 /* initial query and storage of VNIC characteristics */
2226 if (qeth_l2_vnicc_query_chars(card)) {
2227 if (card->options.vnicc.wanted_chars != QETH_VNICC_DEFAULT ||
2228 *timeout != QETH_VNICC_DEFAULT_TIMEOUT)
2229 dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
2230 /* fail quietly if user didn't change the default config */
2231 card->options.vnicc.sup_chars = 0;
2232 card->options.vnicc.cur_chars = 0;
2233 card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
2234 return;
2235 }
2236 /* get supported commands for each supported characteristic */
2237 chars_tmp = card->options.vnicc.sup_chars;
2238 chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
2239 for_each_set_bit(i, &chars_tmp, chars_len) {
2240 vnicc = BIT(i);
2241 if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) {
2242 sup_cmds = 0;
2243 error = true;
2244 }
2245 if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) &&
2246 (sup_cmds & IPA_VNICC_GET_TIMEOUT))
2247 card->options.vnicc.getset_timeout_sup |= vnicc;
2248 else
2249 card->options.vnicc.getset_timeout_sup &= ~vnicc;
2250 if ((sup_cmds & IPA_VNICC_ENABLE) &&
2251 (sup_cmds & IPA_VNICC_DISABLE))
2252 card->options.vnicc.set_char_sup |= vnicc;
2253 else
2254 card->options.vnicc.set_char_sup &= ~vnicc;
2255 }
2256 /* enforce assumed default values and recover settings, if changed */
2257 error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
2258 timeout);
2259 /* Change chars, if necessary */
2260 chars_tmp = card->options.vnicc.wanted_chars ^
2261 card->options.vnicc.cur_chars;
2262 chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
2263 for_each_set_bit(i, &chars_tmp, chars_len) {
2264 vnicc = BIT(i);
2265 enable = card->options.vnicc.wanted_chars & vnicc;
2266 error |= qeth_l2_vnicc_recover_char(card, vnicc, enable);
2267 }
2268 if (error)
2269 dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
2270 }
2271
2272 /* configure default values of VNIC characteristics */
qeth_l2_vnicc_set_defaults(struct qeth_card * card)2273 static void qeth_l2_vnicc_set_defaults(struct qeth_card *card)
2274 {
2275 /* characteristics values */
2276 card->options.vnicc.sup_chars = QETH_VNICC_ALL;
2277 card->options.vnicc.cur_chars = QETH_VNICC_DEFAULT;
2278 card->options.vnicc.learning_timeout = QETH_VNICC_DEFAULT_TIMEOUT;
2279 /* supported commands */
2280 card->options.vnicc.set_char_sup = QETH_VNICC_ALL;
2281 card->options.vnicc.getset_timeout_sup = QETH_VNICC_LEARNING;
2282 /* settings wanted by users */
2283 card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
2284 }
2285
2286 static const struct device_type qeth_l2_devtype = {
2287 .name = "qeth_layer2",
2288 .groups = qeth_l2_attr_groups,
2289 };
2290
qeth_l2_probe_device(struct ccwgroup_device * gdev)2291 static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
2292 {
2293 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2294 int rc;
2295
2296 qeth_l2_vnicc_set_defaults(card);
2297 mutex_init(&card->sbp_lock);
2298
2299 if (gdev->dev.type) {
2300 rc = device_add_groups(&gdev->dev, qeth_l2_attr_groups);
2301 if (rc)
2302 return rc;
2303 } else {
2304 gdev->dev.type = &qeth_l2_devtype;
2305 }
2306
2307 INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
2308 return 0;
2309 }
2310
qeth_l2_remove_device(struct ccwgroup_device * gdev)2311 static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
2312 {
2313 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2314 struct qeth_priv *priv;
2315
2316 if (gdev->dev.type != &qeth_l2_devtype)
2317 device_remove_groups(&gdev->dev, qeth_l2_attr_groups);
2318
2319 qeth_set_allowed_threads(card, 0, 1);
2320 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
2321
2322 if (gdev->state == CCWGROUP_ONLINE)
2323 qeth_set_offline(card, card->discipline, false);
2324
2325 if (card->dev->reg_state == NETREG_REGISTERED) {
2326 priv = netdev_priv(card->dev);
2327 if (priv->brport_features & BR_LEARNING_SYNC) {
2328 rtnl_lock();
2329 qeth_l2_br2dev_put();
2330 rtnl_unlock();
2331 }
2332 unregister_netdev(card->dev);
2333 }
2334 }
2335
qeth_l2_set_online(struct qeth_card * card,bool carrier_ok)2336 static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
2337 {
2338 struct net_device *dev = card->dev;
2339 int rc = 0;
2340
2341 qeth_l2_detect_dev2br_support(card);
2342
2343 mutex_lock(&card->sbp_lock);
2344 qeth_bridgeport_query_support(card);
2345 if (card->options.sbp.supported_funcs) {
2346 qeth_l2_setup_bridgeport_attrs(card);
2347 dev_info(&card->gdev->dev,
2348 "The device represents a Bridge Capable Port\n");
2349 }
2350 mutex_unlock(&card->sbp_lock);
2351
2352 qeth_l2_register_dev_addr(card);
2353
2354 /* for the rx_bcast characteristic, init VNICC after setmac */
2355 qeth_l2_vnicc_init(card);
2356
2357 qeth_l2_trace_features(card);
2358
2359 /* softsetup */
2360 QETH_CARD_TEXT(card, 2, "softsetp");
2361
2362 card->state = CARD_STATE_SOFTSETUP;
2363
2364 qeth_set_allowed_threads(card, 0xffffffff, 0);
2365
2366 if (dev->reg_state != NETREG_REGISTERED) {
2367 rc = qeth_l2_setup_netdev(card);
2368 if (rc)
2369 goto err_setup;
2370
2371 if (carrier_ok)
2372 netif_carrier_on(dev);
2373 } else {
2374 rtnl_lock();
2375 rc = qeth_set_real_num_tx_queues(card,
2376 qeth_tx_actual_queues(card));
2377 if (rc) {
2378 rtnl_unlock();
2379 goto err_set_queues;
2380 }
2381
2382 if (carrier_ok)
2383 netif_carrier_on(dev);
2384 else
2385 netif_carrier_off(dev);
2386
2387 netif_device_attach(dev);
2388 qeth_enable_hw_features(dev);
2389 qeth_l2_enable_brport_features(card);
2390
2391 if (netif_running(dev)) {
2392 local_bh_disable();
2393 napi_schedule(&card->napi);
2394 /* kick-start the NAPI softirq: */
2395 local_bh_enable();
2396 qeth_l2_set_rx_mode(dev);
2397 }
2398 rtnl_unlock();
2399 }
2400 return 0;
2401
2402 err_set_queues:
2403 err_setup:
2404 qeth_set_allowed_threads(card, 0, 1);
2405 card->state = CARD_STATE_DOWN;
2406 return rc;
2407 }
2408
qeth_l2_set_offline(struct qeth_card * card)2409 static void qeth_l2_set_offline(struct qeth_card *card)
2410 {
2411 struct qeth_priv *priv = netdev_priv(card->dev);
2412
2413 qeth_set_allowed_threads(card, 0, 1);
2414 qeth_l2_drain_rx_mode_cache(card);
2415
2416 if (card->state == CARD_STATE_SOFTSETUP)
2417 card->state = CARD_STATE_DOWN;
2418
2419 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
2420 if (priv->brport_features & BR_LEARNING_SYNC)
2421 qeth_l2_dev2br_fdb_flush(card);
2422 }
2423
2424 /* Returns zero if the command is successfully "consumed" */
qeth_l2_control_event(struct qeth_card * card,struct qeth_ipa_cmd * cmd)2425 static int qeth_l2_control_event(struct qeth_card *card,
2426 struct qeth_ipa_cmd *cmd)
2427 {
2428 switch (cmd->hdr.command) {
2429 case IPA_CMD_SETBRIDGEPORT_OSA:
2430 case IPA_CMD_SETBRIDGEPORT_IQD:
2431 if (cmd->data.sbp.hdr.command_code ==
2432 IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
2433 qeth_bridge_state_change(card, cmd);
2434 return 0;
2435 }
2436
2437 return 1;
2438 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
2439 qeth_addr_change_event(card, cmd);
2440 return 0;
2441 default:
2442 return 1;
2443 }
2444 }
2445
2446 const struct qeth_discipline qeth_l2_discipline = {
2447 .setup = qeth_l2_probe_device,
2448 .remove = qeth_l2_remove_device,
2449 .set_online = qeth_l2_set_online,
2450 .set_offline = qeth_l2_set_offline,
2451 .control_event_handler = qeth_l2_control_event,
2452 };
2453 EXPORT_SYMBOL_GPL(qeth_l2_discipline);
2454
qeth_l2_init(void)2455 static int __init qeth_l2_init(void)
2456 {
2457 pr_info("register layer 2 discipline\n");
2458 refcount_set(&qeth_l2_switchdev_notify_refcnt, 0);
2459 return 0;
2460 }
2461
qeth_l2_exit(void)2462 static void __exit qeth_l2_exit(void)
2463 {
2464 pr_info("unregister layer 2 discipline\n");
2465 }
2466
2467 module_init(qeth_l2_init);
2468 module_exit(qeth_l2_exit);
2469 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
2470 MODULE_DESCRIPTION("qeth layer 2 discipline");
2471 MODULE_LICENSE("GPL");
2472