xref: /linux/drivers/net/fjes/fjes_main.c (revision 32786fdc9506aeba98278c1844d4bfb766863832)
1 /*
2  *  FUJITSU Extended Socket Network Device driver
3  *  Copyright (c) 2015 FUJITSU LIMITED
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, see <http://www.gnu.org/licenses/>.
16  *
17  * The full GNU General Public License is included in this distribution in
18  * the file called "COPYING".
19  *
20  */
21 
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/nls.h>
25 #include <linux/platform_device.h>
26 #include <linux/netdevice.h>
27 #include <linux/interrupt.h>
28 
29 #include "fjes.h"
30 #include "fjes_trace.h"
31 
32 #define MAJ 1
33 #define MIN 2
34 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
35 #define DRV_NAME	"fjes"
36 char fjes_driver_name[] = DRV_NAME;
37 char fjes_driver_version[] = DRV_VERSION;
38 static const char fjes_driver_string[] =
39 		"FUJITSU Extended Socket Network Device Driver";
40 static const char fjes_copyright[] =
41 		"Copyright (c) 2015 FUJITSU LIMITED";
42 
43 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
44 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
45 MODULE_LICENSE("GPL");
46 MODULE_VERSION(DRV_VERSION);
47 
48 static int fjes_request_irq(struct fjes_adapter *);
49 static void fjes_free_irq(struct fjes_adapter *);
50 
51 static int fjes_open(struct net_device *);
52 static int fjes_close(struct net_device *);
53 static int fjes_setup_resources(struct fjes_adapter *);
54 static void fjes_free_resources(struct fjes_adapter *);
55 static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
56 static void fjes_raise_intr_rxdata_task(struct work_struct *);
57 static void fjes_tx_stall_task(struct work_struct *);
58 static void fjes_force_close_task(struct work_struct *);
59 static irqreturn_t fjes_intr(int, void*);
60 static struct rtnl_link_stats64 *
61 fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
62 static int fjes_change_mtu(struct net_device *, int);
63 static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
64 static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
65 static void fjes_tx_retry(struct net_device *);
66 
67 static int fjes_acpi_add(struct acpi_device *);
68 static int fjes_acpi_remove(struct acpi_device *);
69 static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
70 
71 static int fjes_probe(struct platform_device *);
72 static int fjes_remove(struct platform_device *);
73 
74 static int fjes_sw_init(struct fjes_adapter *);
75 static void fjes_netdev_setup(struct net_device *);
76 static void fjes_irq_watch_task(struct work_struct *);
77 static void fjes_watch_unshare_task(struct work_struct *);
78 static void fjes_rx_irq(struct fjes_adapter *, int);
79 static int fjes_poll(struct napi_struct *, int);
80 
81 static const struct acpi_device_id fjes_acpi_ids[] = {
82 	{"PNP0C02", 0},
83 	{"", 0},
84 };
85 MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
86 
87 static struct acpi_driver fjes_acpi_driver = {
88 	.name = DRV_NAME,
89 	.class = DRV_NAME,
90 	.owner = THIS_MODULE,
91 	.ids = fjes_acpi_ids,
92 	.ops = {
93 		.add = fjes_acpi_add,
94 		.remove = fjes_acpi_remove,
95 	},
96 };
97 
98 static struct platform_driver fjes_driver = {
99 	.driver = {
100 		.name = DRV_NAME,
101 	},
102 	.probe = fjes_probe,
103 	.remove = fjes_remove,
104 };
105 
106 static struct resource fjes_resource[] = {
107 	{
108 		.flags = IORESOURCE_MEM,
109 		.start = 0,
110 		.end = 0,
111 	},
112 	{
113 		.flags = IORESOURCE_IRQ,
114 		.start = 0,
115 		.end = 0,
116 	},
117 };
118 
119 static int fjes_acpi_add(struct acpi_device *device)
120 {
121 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
122 	char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
123 	struct platform_device *plat_dev;
124 	union acpi_object *str;
125 	acpi_status status;
126 	int result;
127 
128 	status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
129 	if (ACPI_FAILURE(status))
130 		return -ENODEV;
131 
132 	str = buffer.pointer;
133 	result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
134 				 str->string.length, UTF16_LITTLE_ENDIAN,
135 				 str_buf, sizeof(str_buf) - 1);
136 	str_buf[result] = 0;
137 
138 	if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
139 		kfree(buffer.pointer);
140 		return -ENODEV;
141 	}
142 	kfree(buffer.pointer);
143 
144 	status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
145 				     fjes_get_acpi_resource, fjes_resource);
146 	if (ACPI_FAILURE(status))
147 		return -ENODEV;
148 
149 	/* create platform_device */
150 	plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
151 						   ARRAY_SIZE(fjes_resource));
152 	device->driver_data = plat_dev;
153 
154 	return 0;
155 }
156 
157 static int fjes_acpi_remove(struct acpi_device *device)
158 {
159 	struct platform_device *plat_dev;
160 
161 	plat_dev = (struct platform_device *)acpi_driver_data(device);
162 	platform_device_unregister(plat_dev);
163 
164 	return 0;
165 }
166 
167 static acpi_status
168 fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
169 {
170 	struct acpi_resource_address32 *addr;
171 	struct acpi_resource_irq *irq;
172 	struct resource *res = data;
173 
174 	switch (acpi_res->type) {
175 	case ACPI_RESOURCE_TYPE_ADDRESS32:
176 		addr = &acpi_res->data.address32;
177 		res[0].start = addr->address.minimum;
178 		res[0].end = addr->address.minimum +
179 			addr->address.address_length - 1;
180 		break;
181 
182 	case ACPI_RESOURCE_TYPE_IRQ:
183 		irq = &acpi_res->data.irq;
184 		if (irq->interrupt_count != 1)
185 			return AE_ERROR;
186 		res[1].start = irq->interrupts[0];
187 		res[1].end = irq->interrupts[0];
188 		break;
189 
190 	default:
191 		break;
192 	}
193 
194 	return AE_OK;
195 }
196 
197 static int fjes_request_irq(struct fjes_adapter *adapter)
198 {
199 	struct net_device *netdev = adapter->netdev;
200 	int result = -1;
201 
202 	adapter->interrupt_watch_enable = true;
203 	if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
204 		queue_delayed_work(adapter->control_wq,
205 				   &adapter->interrupt_watch_task,
206 				   FJES_IRQ_WATCH_DELAY);
207 	}
208 
209 	if (!adapter->irq_registered) {
210 		result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
211 				     IRQF_SHARED, netdev->name, adapter);
212 		if (result)
213 			adapter->irq_registered = false;
214 		else
215 			adapter->irq_registered = true;
216 	}
217 
218 	return result;
219 }
220 
221 static void fjes_free_irq(struct fjes_adapter *adapter)
222 {
223 	struct fjes_hw *hw = &adapter->hw;
224 
225 	adapter->interrupt_watch_enable = false;
226 	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
227 
228 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
229 
230 	if (adapter->irq_registered) {
231 		free_irq(adapter->hw.hw_res.irq, adapter);
232 		adapter->irq_registered = false;
233 	}
234 }
235 
236 static const struct net_device_ops fjes_netdev_ops = {
237 	.ndo_open		= fjes_open,
238 	.ndo_stop		= fjes_close,
239 	.ndo_start_xmit		= fjes_xmit_frame,
240 	.ndo_get_stats64	= fjes_get_stats64,
241 	.ndo_change_mtu		= fjes_change_mtu,
242 	.ndo_tx_timeout		= fjes_tx_retry,
243 	.ndo_vlan_rx_add_vid	= fjes_vlan_rx_add_vid,
244 	.ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
245 };
246 
247 /* fjes_open - Called when a network interface is made active */
248 static int fjes_open(struct net_device *netdev)
249 {
250 	struct fjes_adapter *adapter = netdev_priv(netdev);
251 	struct fjes_hw *hw = &adapter->hw;
252 	int result;
253 
254 	if (adapter->open_guard)
255 		return -ENXIO;
256 
257 	result = fjes_setup_resources(adapter);
258 	if (result)
259 		goto err_setup_res;
260 
261 	hw->txrx_stop_req_bit = 0;
262 	hw->epstop_req_bit = 0;
263 
264 	napi_enable(&adapter->napi);
265 
266 	fjes_hw_capture_interrupt_status(hw);
267 
268 	result = fjes_request_irq(adapter);
269 	if (result)
270 		goto err_req_irq;
271 
272 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
273 
274 	netif_tx_start_all_queues(netdev);
275 	netif_carrier_on(netdev);
276 
277 	return 0;
278 
279 err_req_irq:
280 	fjes_free_irq(adapter);
281 	napi_disable(&adapter->napi);
282 
283 err_setup_res:
284 	fjes_free_resources(adapter);
285 	return result;
286 }
287 
288 /* fjes_close - Disables a network interface */
289 static int fjes_close(struct net_device *netdev)
290 {
291 	struct fjes_adapter *adapter = netdev_priv(netdev);
292 	struct fjes_hw *hw = &adapter->hw;
293 	unsigned long flags;
294 	int epidx;
295 
296 	netif_tx_stop_all_queues(netdev);
297 	netif_carrier_off(netdev);
298 
299 	fjes_hw_raise_epstop(hw);
300 
301 	napi_disable(&adapter->napi);
302 
303 	spin_lock_irqsave(&hw->rx_status_lock, flags);
304 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
305 		if (epidx == hw->my_epid)
306 			continue;
307 
308 		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
309 		    EP_PARTNER_SHARED)
310 			adapter->hw.ep_shm_info[epidx]
311 				   .tx.info->v1i.rx_status &=
312 				~FJES_RX_POLL_WORK;
313 	}
314 	spin_unlock_irqrestore(&hw->rx_status_lock, flags);
315 
316 	fjes_free_irq(adapter);
317 
318 	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
319 	cancel_work_sync(&adapter->unshare_watch_task);
320 	adapter->unshare_watch_bitmask = 0;
321 	cancel_work_sync(&adapter->raise_intr_rxdata_task);
322 	cancel_work_sync(&adapter->tx_stall_task);
323 
324 	cancel_work_sync(&hw->update_zone_task);
325 	cancel_work_sync(&hw->epstop_task);
326 
327 	fjes_hw_wait_epstop(hw);
328 
329 	fjes_free_resources(adapter);
330 
331 	return 0;
332 }
333 
334 static int fjes_setup_resources(struct fjes_adapter *adapter)
335 {
336 	struct net_device *netdev = adapter->netdev;
337 	struct ep_share_mem_info *buf_pair;
338 	struct fjes_hw *hw = &adapter->hw;
339 	unsigned long flags;
340 	int result;
341 	int epidx;
342 
343 	mutex_lock(&hw->hw_info.lock);
344 	result = fjes_hw_request_info(hw);
345 	switch (result) {
346 	case 0:
347 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
348 			hw->ep_shm_info[epidx].es_status =
349 			    hw->hw_info.res_buf->info.info[epidx].es_status;
350 			hw->ep_shm_info[epidx].zone =
351 			    hw->hw_info.res_buf->info.info[epidx].zone;
352 		}
353 		break;
354 	default:
355 	case -ENOMSG:
356 	case -EBUSY:
357 		adapter->force_reset = true;
358 
359 		mutex_unlock(&hw->hw_info.lock);
360 		return result;
361 	}
362 	mutex_unlock(&hw->hw_info.lock);
363 
364 	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
365 		if ((epidx != hw->my_epid) &&
366 		    (hw->ep_shm_info[epidx].es_status ==
367 		     FJES_ZONING_STATUS_ENABLE)) {
368 			fjes_hw_raise_interrupt(hw, epidx,
369 						REG_ICTL_MASK_INFO_UPDATE);
370 			hw->ep_shm_info[epidx].ep_stats
371 				.send_intr_zoneupdate += 1;
372 		}
373 	}
374 
375 	msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
376 
377 	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
378 		if (epidx == hw->my_epid)
379 			continue;
380 
381 		buf_pair = &hw->ep_shm_info[epidx];
382 
383 		spin_lock_irqsave(&hw->rx_status_lock, flags);
384 		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
385 				    netdev->mtu);
386 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
387 
388 		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
389 			mutex_lock(&hw->hw_info.lock);
390 			result =
391 			fjes_hw_register_buff_addr(hw, epidx, buf_pair);
392 			mutex_unlock(&hw->hw_info.lock);
393 
394 			switch (result) {
395 			case 0:
396 				break;
397 			case -ENOMSG:
398 			case -EBUSY:
399 			default:
400 				adapter->force_reset = true;
401 				return result;
402 			}
403 
404 			hw->ep_shm_info[epidx].ep_stats
405 				.com_regist_buf_exec += 1;
406 		}
407 	}
408 
409 	return 0;
410 }
411 
412 static void fjes_free_resources(struct fjes_adapter *adapter)
413 {
414 	struct net_device *netdev = adapter->netdev;
415 	struct fjes_device_command_param param;
416 	struct ep_share_mem_info *buf_pair;
417 	struct fjes_hw *hw = &adapter->hw;
418 	bool reset_flag = false;
419 	unsigned long flags;
420 	int result;
421 	int epidx;
422 
423 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
424 		if (epidx == hw->my_epid)
425 			continue;
426 
427 		mutex_lock(&hw->hw_info.lock);
428 		result = fjes_hw_unregister_buff_addr(hw, epidx);
429 		mutex_unlock(&hw->hw_info.lock);
430 
431 		hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
432 
433 		if (result)
434 			reset_flag = true;
435 
436 		buf_pair = &hw->ep_shm_info[epidx];
437 
438 		spin_lock_irqsave(&hw->rx_status_lock, flags);
439 		fjes_hw_setup_epbuf(&buf_pair->tx,
440 				    netdev->dev_addr, netdev->mtu);
441 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
442 
443 		clear_bit(epidx, &hw->txrx_stop_req_bit);
444 	}
445 
446 	if (reset_flag || adapter->force_reset) {
447 		result = fjes_hw_reset(hw);
448 
449 		adapter->force_reset = false;
450 
451 		if (result)
452 			adapter->open_guard = true;
453 
454 		hw->hw_info.buffer_share_bit = 0;
455 
456 		memset((void *)&param, 0, sizeof(param));
457 
458 		param.req_len = hw->hw_info.req_buf_size;
459 		param.req_start = __pa(hw->hw_info.req_buf);
460 		param.res_len = hw->hw_info.res_buf_size;
461 		param.res_start = __pa(hw->hw_info.res_buf);
462 		param.share_start = __pa(hw->hw_info.share->ep_status);
463 
464 		fjes_hw_init_command_registers(hw, &param);
465 	}
466 }
467 
468 static void fjes_tx_stall_task(struct work_struct *work)
469 {
470 	struct fjes_adapter *adapter = container_of(work,
471 			struct fjes_adapter, tx_stall_task);
472 	struct net_device *netdev = adapter->netdev;
473 	struct fjes_hw *hw = &adapter->hw;
474 	int all_queue_available, sendable;
475 	enum ep_partner_status pstatus;
476 	int max_epid, my_epid, epid;
477 	union ep_buffer_info *info;
478 	int i;
479 
480 	if (((long)jiffies -
481 		dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
482 		netif_wake_queue(netdev);
483 		return;
484 	}
485 
486 	my_epid = hw->my_epid;
487 	max_epid = hw->max_epid;
488 
489 	for (i = 0; i < 5; i++) {
490 		all_queue_available = 1;
491 
492 		for (epid = 0; epid < max_epid; epid++) {
493 			if (my_epid == epid)
494 				continue;
495 
496 			pstatus = fjes_hw_get_partner_ep_status(hw, epid);
497 			sendable = (pstatus == EP_PARTNER_SHARED);
498 			if (!sendable)
499 				continue;
500 
501 			info = adapter->hw.ep_shm_info[epid].tx.info;
502 
503 			if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
504 				return;
505 
506 			if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
507 					 info->v1i.count_max)) {
508 				all_queue_available = 0;
509 				break;
510 			}
511 		}
512 
513 		if (all_queue_available) {
514 			netif_wake_queue(netdev);
515 			return;
516 		}
517 	}
518 
519 	usleep_range(50, 100);
520 
521 	queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
522 }
523 
524 static void fjes_force_close_task(struct work_struct *work)
525 {
526 	struct fjes_adapter *adapter = container_of(work,
527 			struct fjes_adapter, force_close_task);
528 	struct net_device *netdev = adapter->netdev;
529 
530 	rtnl_lock();
531 	dev_close(netdev);
532 	rtnl_unlock();
533 }
534 
535 static void fjes_raise_intr_rxdata_task(struct work_struct *work)
536 {
537 	struct fjes_adapter *adapter = container_of(work,
538 			struct fjes_adapter, raise_intr_rxdata_task);
539 	struct fjes_hw *hw = &adapter->hw;
540 	enum ep_partner_status pstatus;
541 	int max_epid, my_epid, epid;
542 
543 	my_epid = hw->my_epid;
544 	max_epid = hw->max_epid;
545 
546 	for (epid = 0; epid < max_epid; epid++)
547 		hw->ep_shm_info[epid].tx_status_work = 0;
548 
549 	for (epid = 0; epid < max_epid; epid++) {
550 		if (epid == my_epid)
551 			continue;
552 
553 		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
554 		if (pstatus == EP_PARTNER_SHARED) {
555 			hw->ep_shm_info[epid].tx_status_work =
556 				hw->ep_shm_info[epid].tx.info->v1i.tx_status;
557 
558 			if (hw->ep_shm_info[epid].tx_status_work ==
559 				FJES_TX_DELAY_SEND_PENDING) {
560 				hw->ep_shm_info[epid].tx.info->v1i.tx_status =
561 					FJES_TX_DELAY_SEND_NONE;
562 			}
563 		}
564 	}
565 
566 	for (epid = 0; epid < max_epid; epid++) {
567 		if (epid == my_epid)
568 			continue;
569 
570 		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
571 		if ((hw->ep_shm_info[epid].tx_status_work ==
572 		     FJES_TX_DELAY_SEND_PENDING) &&
573 		    (pstatus == EP_PARTNER_SHARED) &&
574 		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
575 		      FJES_RX_POLL_WORK)) {
576 			fjes_hw_raise_interrupt(hw, epid,
577 						REG_ICTL_MASK_RX_DATA);
578 			hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
579 		}
580 	}
581 
582 	usleep_range(500, 1000);
583 }
584 
585 static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
586 			void *data, size_t len)
587 {
588 	int retval;
589 
590 	retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
591 					   data, len);
592 	if (retval)
593 		return retval;
594 
595 	adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
596 		FJES_TX_DELAY_SEND_PENDING;
597 	if (!work_pending(&adapter->raise_intr_rxdata_task))
598 		queue_work(adapter->txrx_wq,
599 			   &adapter->raise_intr_rxdata_task);
600 
601 	retval = 0;
602 	return retval;
603 }
604 
605 static netdev_tx_t
606 fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
607 {
608 	struct fjes_adapter *adapter = netdev_priv(netdev);
609 	struct fjes_hw *hw = &adapter->hw;
610 
611 	int max_epid, my_epid, dest_epid;
612 	enum ep_partner_status pstatus;
613 	struct netdev_queue *cur_queue;
614 	char shortpkt[VLAN_ETH_HLEN];
615 	bool is_multi, vlan;
616 	struct ethhdr *eth;
617 	u16 queue_no = 0;
618 	u16 vlan_id = 0;
619 	netdev_tx_t ret;
620 	char *data;
621 	int len;
622 
623 	ret = NETDEV_TX_OK;
624 	is_multi = false;
625 	cur_queue = netdev_get_tx_queue(netdev, queue_no);
626 
627 	eth = (struct ethhdr *)skb->data;
628 	my_epid = hw->my_epid;
629 
630 	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
631 
632 	data = skb->data;
633 	len = skb->len;
634 
635 	if (is_multicast_ether_addr(eth->h_dest)) {
636 		dest_epid = 0;
637 		max_epid = hw->max_epid;
638 		is_multi = true;
639 	} else if (is_local_ether_addr(eth->h_dest)) {
640 		dest_epid = eth->h_dest[ETH_ALEN - 1];
641 		max_epid = dest_epid + 1;
642 
643 		if ((eth->h_dest[0] == 0x02) &&
644 		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
645 			      eth->h_dest[3] | eth->h_dest[4])) &&
646 		    (dest_epid < hw->max_epid)) {
647 			;
648 		} else {
649 			dest_epid = 0;
650 			max_epid = 0;
651 			ret = NETDEV_TX_OK;
652 
653 			adapter->stats64.tx_packets += 1;
654 			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
655 			adapter->stats64.tx_bytes += len;
656 			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
657 		}
658 	} else {
659 		dest_epid = 0;
660 		max_epid = 0;
661 		ret = NETDEV_TX_OK;
662 
663 		adapter->stats64.tx_packets += 1;
664 		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
665 		adapter->stats64.tx_bytes += len;
666 		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
667 	}
668 
669 	for (; dest_epid < max_epid; dest_epid++) {
670 		if (my_epid == dest_epid)
671 			continue;
672 
673 		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
674 		if (pstatus != EP_PARTNER_SHARED) {
675 			if (!is_multi)
676 				hw->ep_shm_info[dest_epid].ep_stats
677 					.tx_dropped_not_shared += 1;
678 			ret = NETDEV_TX_OK;
679 		} else if (!fjes_hw_check_epbuf_version(
680 				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
681 			/* version is NOT 0 */
682 			adapter->stats64.tx_carrier_errors += 1;
683 			hw->ep_shm_info[dest_epid].net_stats
684 						.tx_carrier_errors += 1;
685 			hw->ep_shm_info[dest_epid].ep_stats
686 					.tx_dropped_ver_mismatch += 1;
687 
688 			ret = NETDEV_TX_OK;
689 		} else if (!fjes_hw_check_mtu(
690 				&adapter->hw.ep_shm_info[dest_epid].rx,
691 				netdev->mtu)) {
692 			adapter->stats64.tx_dropped += 1;
693 			hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
694 			adapter->stats64.tx_errors += 1;
695 			hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
696 			hw->ep_shm_info[dest_epid].ep_stats
697 					.tx_dropped_buf_size_mismatch += 1;
698 
699 			ret = NETDEV_TX_OK;
700 		} else if (vlan &&
701 			   !fjes_hw_check_vlan_id(
702 				&adapter->hw.ep_shm_info[dest_epid].rx,
703 				vlan_id)) {
704 			hw->ep_shm_info[dest_epid].ep_stats
705 				.tx_dropped_vlanid_mismatch += 1;
706 			ret = NETDEV_TX_OK;
707 		} else {
708 			if (len < VLAN_ETH_HLEN) {
709 				memset(shortpkt, 0, VLAN_ETH_HLEN);
710 				memcpy(shortpkt, skb->data, skb->len);
711 				len = VLAN_ETH_HLEN;
712 				data = shortpkt;
713 			}
714 
715 			if (adapter->tx_retry_count == 0) {
716 				adapter->tx_start_jiffies = jiffies;
717 				adapter->tx_retry_count = 1;
718 			} else {
719 				adapter->tx_retry_count++;
720 			}
721 
722 			if (fjes_tx_send(adapter, dest_epid, data, len)) {
723 				if (is_multi) {
724 					ret = NETDEV_TX_OK;
725 				} else if (
726 					   ((long)jiffies -
727 					    (long)adapter->tx_start_jiffies) >=
728 					    FJES_TX_RETRY_TIMEOUT) {
729 					adapter->stats64.tx_fifo_errors += 1;
730 					hw->ep_shm_info[dest_epid].net_stats
731 								.tx_fifo_errors += 1;
732 					adapter->stats64.tx_errors += 1;
733 					hw->ep_shm_info[dest_epid].net_stats
734 								.tx_errors += 1;
735 
736 					ret = NETDEV_TX_OK;
737 				} else {
738 					netif_trans_update(netdev);
739 					hw->ep_shm_info[dest_epid].ep_stats
740 						.tx_buffer_full += 1;
741 					netif_tx_stop_queue(cur_queue);
742 
743 					if (!work_pending(&adapter->tx_stall_task))
744 						queue_work(adapter->txrx_wq,
745 							   &adapter->tx_stall_task);
746 
747 					ret = NETDEV_TX_BUSY;
748 				}
749 			} else {
750 				if (!is_multi) {
751 					adapter->stats64.tx_packets += 1;
752 					hw->ep_shm_info[dest_epid].net_stats
753 								.tx_packets += 1;
754 					adapter->stats64.tx_bytes += len;
755 					hw->ep_shm_info[dest_epid].net_stats
756 								.tx_bytes += len;
757 				}
758 
759 				adapter->tx_retry_count = 0;
760 				ret = NETDEV_TX_OK;
761 			}
762 		}
763 	}
764 
765 	if (ret == NETDEV_TX_OK) {
766 		dev_kfree_skb(skb);
767 		if (is_multi) {
768 			adapter->stats64.tx_packets += 1;
769 			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
770 			adapter->stats64.tx_bytes += 1;
771 			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
772 		}
773 	}
774 
775 	return ret;
776 }
777 
778 static void fjes_tx_retry(struct net_device *netdev)
779 {
780 	struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
781 
782 	netif_tx_wake_queue(queue);
783 }
784 
785 static struct rtnl_link_stats64 *
786 fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
787 {
788 	struct fjes_adapter *adapter = netdev_priv(netdev);
789 
790 	memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
791 
792 	return stats;
793 }
794 
795 static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
796 {
797 	struct fjes_adapter *adapter = netdev_priv(netdev);
798 	bool running = netif_running(netdev);
799 	struct fjes_hw *hw = &adapter->hw;
800 	unsigned long flags;
801 	int ret = -EINVAL;
802 	int idx, epidx;
803 
804 	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
805 		if (new_mtu <= fjes_support_mtu[idx]) {
806 			new_mtu = fjes_support_mtu[idx];
807 			if (new_mtu == netdev->mtu)
808 				return 0;
809 
810 			ret = 0;
811 			break;
812 		}
813 	}
814 
815 	if (ret)
816 		return ret;
817 
818 	if (running) {
819 		spin_lock_irqsave(&hw->rx_status_lock, flags);
820 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
821 			if (epidx == hw->my_epid)
822 				continue;
823 			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
824 				~FJES_RX_MTU_CHANGING_DONE;
825 		}
826 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
827 
828 		netif_tx_stop_all_queues(netdev);
829 		netif_carrier_off(netdev);
830 		cancel_work_sync(&adapter->tx_stall_task);
831 		napi_disable(&adapter->napi);
832 
833 		msleep(1000);
834 
835 		netif_tx_stop_all_queues(netdev);
836 	}
837 
838 	netdev->mtu = new_mtu;
839 
840 	if (running) {
841 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
842 			if (epidx == hw->my_epid)
843 				continue;
844 
845 			spin_lock_irqsave(&hw->rx_status_lock, flags);
846 			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
847 					    netdev->dev_addr,
848 					    netdev->mtu);
849 
850 			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
851 				FJES_RX_MTU_CHANGING_DONE;
852 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
853 		}
854 
855 		netif_tx_wake_all_queues(netdev);
856 		netif_carrier_on(netdev);
857 		napi_enable(&adapter->napi);
858 		napi_schedule(&adapter->napi);
859 	}
860 
861 	return ret;
862 }
863 
864 static int fjes_vlan_rx_add_vid(struct net_device *netdev,
865 				__be16 proto, u16 vid)
866 {
867 	struct fjes_adapter *adapter = netdev_priv(netdev);
868 	bool ret = true;
869 	int epid;
870 
871 	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
872 		if (epid == adapter->hw.my_epid)
873 			continue;
874 
875 		if (!fjes_hw_check_vlan_id(
876 			&adapter->hw.ep_shm_info[epid].tx, vid))
877 			ret = fjes_hw_set_vlan_id(
878 				&adapter->hw.ep_shm_info[epid].tx, vid);
879 	}
880 
881 	return ret ? 0 : -ENOSPC;
882 }
883 
884 static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
885 				 __be16 proto, u16 vid)
886 {
887 	struct fjes_adapter *adapter = netdev_priv(netdev);
888 	int epid;
889 
890 	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
891 		if (epid == adapter->hw.my_epid)
892 			continue;
893 
894 		fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
895 	}
896 
897 	return 0;
898 }
899 
900 static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
901 				   int src_epid)
902 {
903 	struct fjes_hw *hw = &adapter->hw;
904 	enum ep_partner_status status;
905 	unsigned long flags;
906 
907 	status = fjes_hw_get_partner_ep_status(hw, src_epid);
908 	trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
909 	switch (status) {
910 	case EP_PARTNER_UNSHARE:
911 	case EP_PARTNER_COMPLETE:
912 	default:
913 		break;
914 	case EP_PARTNER_WAITING:
915 		if (src_epid < hw->my_epid) {
916 			spin_lock_irqsave(&hw->rx_status_lock, flags);
917 			hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
918 				FJES_RX_STOP_REQ_DONE;
919 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
920 
921 			clear_bit(src_epid, &hw->txrx_stop_req_bit);
922 			set_bit(src_epid, &adapter->unshare_watch_bitmask);
923 
924 			if (!work_pending(&adapter->unshare_watch_task))
925 				queue_work(adapter->control_wq,
926 					   &adapter->unshare_watch_task);
927 		}
928 		break;
929 	case EP_PARTNER_SHARED:
930 		if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
931 		    FJES_RX_STOP_REQ_REQUEST) {
932 			set_bit(src_epid, &hw->epstop_req_bit);
933 			if (!work_pending(&hw->epstop_task))
934 				queue_work(adapter->control_wq,
935 					   &hw->epstop_task);
936 		}
937 		break;
938 	}
939 	trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
940 }
941 
942 static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
943 {
944 	struct fjes_hw *hw = &adapter->hw;
945 	enum ep_partner_status status;
946 	unsigned long flags;
947 
948 	set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
949 
950 	status = fjes_hw_get_partner_ep_status(hw, src_epid);
951 	trace_fjes_stop_req_irq_pre(hw, src_epid, status);
952 	switch (status) {
953 	case EP_PARTNER_WAITING:
954 		spin_lock_irqsave(&hw->rx_status_lock, flags);
955 		hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
956 				FJES_RX_STOP_REQ_DONE;
957 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
958 		clear_bit(src_epid, &hw->txrx_stop_req_bit);
959 		/* fall through */
960 	case EP_PARTNER_UNSHARE:
961 	case EP_PARTNER_COMPLETE:
962 	default:
963 		set_bit(src_epid, &adapter->unshare_watch_bitmask);
964 		if (!work_pending(&adapter->unshare_watch_task))
965 			queue_work(adapter->control_wq,
966 				   &adapter->unshare_watch_task);
967 		break;
968 	case EP_PARTNER_SHARED:
969 		set_bit(src_epid, &hw->epstop_req_bit);
970 
971 		if (!work_pending(&hw->epstop_task))
972 			queue_work(adapter->control_wq, &hw->epstop_task);
973 		break;
974 	}
975 	trace_fjes_stop_req_irq_post(hw, src_epid);
976 }
977 
978 static void fjes_update_zone_irq(struct fjes_adapter *adapter,
979 				 int src_epid)
980 {
981 	struct fjes_hw *hw = &adapter->hw;
982 
983 	if (!work_pending(&hw->update_zone_task))
984 		queue_work(adapter->control_wq, &hw->update_zone_task);
985 }
986 
987 static irqreturn_t fjes_intr(int irq, void *data)
988 {
989 	struct fjes_adapter *adapter = data;
990 	struct fjes_hw *hw = &adapter->hw;
991 	irqreturn_t ret;
992 	u32 icr;
993 
994 	icr = fjes_hw_capture_interrupt_status(hw);
995 
996 	if (icr & REG_IS_MASK_IS_ASSERT) {
997 		if (icr & REG_ICTL_MASK_RX_DATA) {
998 			fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
999 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1000 				.recv_intr_rx += 1;
1001 		}
1002 
1003 		if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
1004 			fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
1005 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1006 				.recv_intr_stop += 1;
1007 		}
1008 
1009 		if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
1010 			fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
1011 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1012 				.recv_intr_unshare += 1;
1013 		}
1014 
1015 		if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
1016 			fjes_hw_set_irqmask(hw,
1017 					    REG_ICTL_MASK_TXRX_STOP_DONE, true);
1018 
1019 		if (icr & REG_ICTL_MASK_INFO_UPDATE) {
1020 			fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
1021 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1022 				.recv_intr_zoneupdate += 1;
1023 		}
1024 
1025 		ret = IRQ_HANDLED;
1026 	} else {
1027 		ret = IRQ_NONE;
1028 	}
1029 
1030 	return ret;
1031 }
1032 
1033 static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
1034 				     int start_epid)
1035 {
1036 	struct fjes_hw *hw = &adapter->hw;
1037 	enum ep_partner_status pstatus;
1038 	int max_epid, cur_epid;
1039 	int i;
1040 
1041 	max_epid = hw->max_epid;
1042 	start_epid = (start_epid + 1 + max_epid) % max_epid;
1043 
1044 	for (i = 0; i < max_epid; i++) {
1045 		cur_epid = (start_epid + i) % max_epid;
1046 		if (cur_epid == hw->my_epid)
1047 			continue;
1048 
1049 		pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
1050 		if (pstatus == EP_PARTNER_SHARED) {
1051 			if (!fjes_hw_epbuf_rx_is_empty(
1052 				&hw->ep_shm_info[cur_epid].rx))
1053 				return cur_epid;
1054 		}
1055 	}
1056 	return -1;
1057 }
1058 
1059 static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
1060 			      int *cur_epid)
1061 {
1062 	void *frame;
1063 
1064 	*cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
1065 	if (*cur_epid < 0)
1066 		return NULL;
1067 
1068 	frame =
1069 	fjes_hw_epbuf_rx_curpkt_get_addr(
1070 		&adapter->hw.ep_shm_info[*cur_epid].rx, psize);
1071 
1072 	return frame;
1073 }
1074 
1075 static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
1076 {
1077 	fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
1078 }
1079 
1080 static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
1081 {
1082 	struct fjes_hw *hw = &adapter->hw;
1083 
1084 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
1085 
1086 	adapter->unset_rx_last = true;
1087 	napi_schedule(&adapter->napi);
1088 }
1089 
1090 static int fjes_poll(struct napi_struct *napi, int budget)
1091 {
1092 	struct fjes_adapter *adapter =
1093 			container_of(napi, struct fjes_adapter, napi);
1094 	struct net_device *netdev = napi->dev;
1095 	struct fjes_hw *hw = &adapter->hw;
1096 	struct sk_buff *skb;
1097 	int work_done = 0;
1098 	int cur_epid = 0;
1099 	int epidx;
1100 	size_t frame_len;
1101 	void *frame;
1102 
1103 	spin_lock(&hw->rx_status_lock);
1104 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
1105 		if (epidx == hw->my_epid)
1106 			continue;
1107 
1108 		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1109 		    EP_PARTNER_SHARED)
1110 			adapter->hw.ep_shm_info[epidx]
1111 				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
1112 	}
1113 	spin_unlock(&hw->rx_status_lock);
1114 
1115 	while (work_done < budget) {
1116 		prefetch(&adapter->hw);
1117 		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
1118 
1119 		if (frame) {
1120 			skb = napi_alloc_skb(napi, frame_len);
1121 			if (!skb) {
1122 				adapter->stats64.rx_dropped += 1;
1123 				hw->ep_shm_info[cur_epid].net_stats
1124 							 .rx_dropped += 1;
1125 				adapter->stats64.rx_errors += 1;
1126 				hw->ep_shm_info[cur_epid].net_stats
1127 							 .rx_errors += 1;
1128 			} else {
1129 				memcpy(skb_put(skb, frame_len),
1130 				       frame, frame_len);
1131 				skb->protocol = eth_type_trans(skb, netdev);
1132 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1133 
1134 				netif_receive_skb(skb);
1135 
1136 				work_done++;
1137 
1138 				adapter->stats64.rx_packets += 1;
1139 				hw->ep_shm_info[cur_epid].net_stats
1140 							 .rx_packets += 1;
1141 				adapter->stats64.rx_bytes += frame_len;
1142 				hw->ep_shm_info[cur_epid].net_stats
1143 							 .rx_bytes += frame_len;
1144 
1145 				if (is_multicast_ether_addr(
1146 					((struct ethhdr *)frame)->h_dest)) {
1147 					adapter->stats64.multicast += 1;
1148 					hw->ep_shm_info[cur_epid].net_stats
1149 								 .multicast += 1;
1150 				}
1151 			}
1152 
1153 			fjes_rxframe_release(adapter, cur_epid);
1154 			adapter->unset_rx_last = true;
1155 		} else {
1156 			break;
1157 		}
1158 	}
1159 
1160 	if (work_done < budget) {
1161 		napi_complete(napi);
1162 
1163 		if (adapter->unset_rx_last) {
1164 			adapter->rx_last_jiffies = jiffies;
1165 			adapter->unset_rx_last = false;
1166 		}
1167 
1168 		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
1169 			napi_reschedule(napi);
1170 		} else {
1171 			spin_lock(&hw->rx_status_lock);
1172 			for (epidx = 0; epidx < hw->max_epid; epidx++) {
1173 				if (epidx == hw->my_epid)
1174 					continue;
1175 				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1176 				    EP_PARTNER_SHARED)
1177 					adapter->hw.ep_shm_info[epidx].tx
1178 						   .info->v1i.rx_status &=
1179 						~FJES_RX_POLL_WORK;
1180 			}
1181 			spin_unlock(&hw->rx_status_lock);
1182 
1183 			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
1184 		}
1185 	}
1186 
1187 	return work_done;
1188 }
1189 
1190 /* fjes_probe - Device Initialization Routine */
1191 static int fjes_probe(struct platform_device *plat_dev)
1192 {
1193 	struct fjes_adapter *adapter;
1194 	struct net_device *netdev;
1195 	struct resource *res;
1196 	struct fjes_hw *hw;
1197 	int err;
1198 
1199 	err = -ENOMEM;
1200 	netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
1201 				 NET_NAME_UNKNOWN, fjes_netdev_setup,
1202 				 FJES_MAX_QUEUES);
1203 
1204 	if (!netdev)
1205 		goto err_out;
1206 
1207 	SET_NETDEV_DEV(netdev, &plat_dev->dev);
1208 
1209 	dev_set_drvdata(&plat_dev->dev, netdev);
1210 	adapter = netdev_priv(netdev);
1211 	adapter->netdev = netdev;
1212 	adapter->plat_dev = plat_dev;
1213 	hw = &adapter->hw;
1214 	hw->back = adapter;
1215 
1216 	/* setup the private structure */
1217 	err = fjes_sw_init(adapter);
1218 	if (err)
1219 		goto err_free_netdev;
1220 
1221 	INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
1222 	adapter->force_reset = false;
1223 	adapter->open_guard = false;
1224 
1225 	adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1226 	adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1227 					      WQ_MEM_RECLAIM, 0);
1228 
1229 	INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1230 	INIT_WORK(&adapter->raise_intr_rxdata_task,
1231 		  fjes_raise_intr_rxdata_task);
1232 	INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
1233 	adapter->unshare_watch_bitmask = 0;
1234 
1235 	INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
1236 	adapter->interrupt_watch_enable = false;
1237 
1238 	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
1239 	hw->hw_res.start = res->start;
1240 	hw->hw_res.size = resource_size(res);
1241 	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1242 	err = fjes_hw_init(&adapter->hw);
1243 	if (err)
1244 		goto err_free_netdev;
1245 
1246 	/* setup MAC address (02:00:00:00:00:[epid])*/
1247 	netdev->dev_addr[0] = 2;
1248 	netdev->dev_addr[1] = 0;
1249 	netdev->dev_addr[2] = 0;
1250 	netdev->dev_addr[3] = 0;
1251 	netdev->dev_addr[4] = 0;
1252 	netdev->dev_addr[5] = hw->my_epid; /* EPID */
1253 
1254 	err = register_netdev(netdev);
1255 	if (err)
1256 		goto err_hw_exit;
1257 
1258 	netif_carrier_off(netdev);
1259 
1260 	fjes_dbg_adapter_init(adapter);
1261 
1262 	return 0;
1263 
1264 err_hw_exit:
1265 	fjes_hw_exit(&adapter->hw);
1266 err_free_netdev:
1267 	free_netdev(netdev);
1268 err_out:
1269 	return err;
1270 }
1271 
1272 /* fjes_remove - Device Removal Routine */
1273 static int fjes_remove(struct platform_device *plat_dev)
1274 {
1275 	struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
1276 	struct fjes_adapter *adapter = netdev_priv(netdev);
1277 	struct fjes_hw *hw = &adapter->hw;
1278 
1279 	fjes_dbg_adapter_exit(adapter);
1280 
1281 	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
1282 	cancel_work_sync(&adapter->unshare_watch_task);
1283 	cancel_work_sync(&adapter->raise_intr_rxdata_task);
1284 	cancel_work_sync(&adapter->tx_stall_task);
1285 	if (adapter->control_wq)
1286 		destroy_workqueue(adapter->control_wq);
1287 	if (adapter->txrx_wq)
1288 		destroy_workqueue(adapter->txrx_wq);
1289 
1290 	unregister_netdev(netdev);
1291 
1292 	fjes_hw_exit(hw);
1293 
1294 	netif_napi_del(&adapter->napi);
1295 
1296 	free_netdev(netdev);
1297 
1298 	return 0;
1299 }
1300 
1301 static int fjes_sw_init(struct fjes_adapter *adapter)
1302 {
1303 	struct net_device *netdev = adapter->netdev;
1304 
1305 	netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
1306 
1307 	return 0;
1308 }
1309 
1310 /* fjes_netdev_setup - netdevice initialization routine */
1311 static void fjes_netdev_setup(struct net_device *netdev)
1312 {
1313 	ether_setup(netdev);
1314 
1315 	netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
1316 	netdev->netdev_ops = &fjes_netdev_ops;
1317 	fjes_set_ethtool_ops(netdev);
1318 	netdev->mtu = fjes_support_mtu[3];
1319 	netdev->min_mtu = fjes_support_mtu[0];
1320 	netdev->max_mtu = fjes_support_mtu[3];
1321 	netdev->flags |= IFF_BROADCAST;
1322 	netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
1323 }
1324 
1325 static void fjes_irq_watch_task(struct work_struct *work)
1326 {
1327 	struct fjes_adapter *adapter = container_of(to_delayed_work(work),
1328 			struct fjes_adapter, interrupt_watch_task);
1329 
1330 	local_irq_disable();
1331 	fjes_intr(adapter->hw.hw_res.irq, adapter);
1332 	local_irq_enable();
1333 
1334 	if (fjes_rxframe_search_exist(adapter, 0) >= 0)
1335 		napi_schedule(&adapter->napi);
1336 
1337 	if (adapter->interrupt_watch_enable) {
1338 		if (!delayed_work_pending(&adapter->interrupt_watch_task))
1339 			queue_delayed_work(adapter->control_wq,
1340 					   &adapter->interrupt_watch_task,
1341 					   FJES_IRQ_WATCH_DELAY);
1342 	}
1343 }
1344 
1345 static void fjes_watch_unshare_task(struct work_struct *work)
1346 {
1347 	struct fjes_adapter *adapter =
1348 	container_of(work, struct fjes_adapter, unshare_watch_task);
1349 
1350 	struct net_device *netdev = adapter->netdev;
1351 	struct fjes_hw *hw = &adapter->hw;
1352 
1353 	int unshare_watch, unshare_reserve;
1354 	int max_epid, my_epid, epidx;
1355 	int stop_req, stop_req_done;
1356 	ulong unshare_watch_bitmask;
1357 	unsigned long flags;
1358 	int wait_time = 0;
1359 	int is_shared;
1360 	int ret;
1361 
1362 	my_epid = hw->my_epid;
1363 	max_epid = hw->max_epid;
1364 
1365 	unshare_watch_bitmask = adapter->unshare_watch_bitmask;
1366 	adapter->unshare_watch_bitmask = 0;
1367 
1368 	while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
1369 	       (wait_time < 3000)) {
1370 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
1371 			if (epidx == hw->my_epid)
1372 				continue;
1373 
1374 			is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
1375 							   epidx);
1376 
1377 			stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
1378 
1379 			stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
1380 					FJES_RX_STOP_REQ_DONE;
1381 
1382 			unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
1383 
1384 			unshare_reserve = test_bit(epidx,
1385 						   &hw->hw_info.buffer_unshare_reserve_bit);
1386 
1387 			if ((!stop_req ||
1388 			     (is_shared && (!is_shared || !stop_req_done))) &&
1389 			    (is_shared || !unshare_watch || !unshare_reserve))
1390 				continue;
1391 
1392 			mutex_lock(&hw->hw_info.lock);
1393 			ret = fjes_hw_unregister_buff_addr(hw, epidx);
1394 			switch (ret) {
1395 			case 0:
1396 				break;
1397 			case -ENOMSG:
1398 			case -EBUSY:
1399 			default:
1400 				if (!work_pending(
1401 					&adapter->force_close_task)) {
1402 					adapter->force_reset = true;
1403 					schedule_work(
1404 						&adapter->force_close_task);
1405 				}
1406 				break;
1407 			}
1408 			mutex_unlock(&hw->hw_info.lock);
1409 			hw->ep_shm_info[epidx].ep_stats
1410 					.com_unregist_buf_exec += 1;
1411 
1412 			spin_lock_irqsave(&hw->rx_status_lock, flags);
1413 			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1414 					    netdev->dev_addr, netdev->mtu);
1415 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1416 
1417 			clear_bit(epidx, &hw->txrx_stop_req_bit);
1418 			clear_bit(epidx, &unshare_watch_bitmask);
1419 			clear_bit(epidx,
1420 				  &hw->hw_info.buffer_unshare_reserve_bit);
1421 		}
1422 
1423 		msleep(100);
1424 		wait_time += 100;
1425 	}
1426 
1427 	if (hw->hw_info.buffer_unshare_reserve_bit) {
1428 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
1429 			if (epidx == hw->my_epid)
1430 				continue;
1431 
1432 			if (test_bit(epidx,
1433 				     &hw->hw_info.buffer_unshare_reserve_bit)) {
1434 				mutex_lock(&hw->hw_info.lock);
1435 
1436 				ret = fjes_hw_unregister_buff_addr(hw, epidx);
1437 				switch (ret) {
1438 				case 0:
1439 					break;
1440 				case -ENOMSG:
1441 				case -EBUSY:
1442 				default:
1443 					if (!work_pending(
1444 						&adapter->force_close_task)) {
1445 						adapter->force_reset = true;
1446 						schedule_work(
1447 							&adapter->force_close_task);
1448 					}
1449 					break;
1450 				}
1451 				mutex_unlock(&hw->hw_info.lock);
1452 
1453 				hw->ep_shm_info[epidx].ep_stats
1454 					.com_unregist_buf_exec += 1;
1455 
1456 				spin_lock_irqsave(&hw->rx_status_lock, flags);
1457 				fjes_hw_setup_epbuf(
1458 					&hw->ep_shm_info[epidx].tx,
1459 					netdev->dev_addr, netdev->mtu);
1460 				spin_unlock_irqrestore(&hw->rx_status_lock,
1461 						       flags);
1462 
1463 				clear_bit(epidx, &hw->txrx_stop_req_bit);
1464 				clear_bit(epidx, &unshare_watch_bitmask);
1465 				clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1466 			}
1467 
1468 			if (test_bit(epidx, &unshare_watch_bitmask)) {
1469 				spin_lock_irqsave(&hw->rx_status_lock, flags);
1470 				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
1471 						~FJES_RX_STOP_REQ_DONE;
1472 				spin_unlock_irqrestore(&hw->rx_status_lock,
1473 						       flags);
1474 			}
1475 		}
1476 	}
1477 }
1478 
1479 /* fjes_init_module - Driver Registration Routine */
1480 static int __init fjes_init_module(void)
1481 {
1482 	int result;
1483 
1484 	pr_info("%s - version %s - %s\n",
1485 		fjes_driver_string, fjes_driver_version, fjes_copyright);
1486 
1487 	fjes_dbg_init();
1488 
1489 	result = platform_driver_register(&fjes_driver);
1490 	if (result < 0) {
1491 		fjes_dbg_exit();
1492 		return result;
1493 	}
1494 
1495 	result = acpi_bus_register_driver(&fjes_acpi_driver);
1496 	if (result < 0)
1497 		goto fail_acpi_driver;
1498 
1499 	return 0;
1500 
1501 fail_acpi_driver:
1502 	platform_driver_unregister(&fjes_driver);
1503 	fjes_dbg_exit();
1504 	return result;
1505 }
1506 
1507 module_init(fjes_init_module);
1508 
1509 /* fjes_exit_module - Driver Exit Cleanup Routine */
1510 static void __exit fjes_exit_module(void)
1511 {
1512 	acpi_bus_unregister_driver(&fjes_acpi_driver);
1513 	platform_driver_unregister(&fjes_driver);
1514 	fjes_dbg_exit();
1515 }
1516 
1517 module_exit(fjes_exit_module);
1518