xref: /linux/drivers/net/fjes/fjes_main.c (revision 75a6faf617d107bdbc74d36ccf89f2280b96ac26)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  FUJITSU Extended Socket Network Device driver
4  *  Copyright (c) 2015 FUJITSU LIMITED
5  */
6 
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/nls.h>
10 #include <linux/platform_device.h>
11 #include <linux/netdevice.h>
12 #include <linux/interrupt.h>
13 
14 #include "fjes.h"
15 #include "fjes_trace.h"
16 
17 #define MAJ 1
18 #define MIN 2
19 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
20 #define DRV_NAME	"fjes"
21 char fjes_driver_name[] = DRV_NAME;
22 char fjes_driver_version[] = DRV_VERSION;
23 static const char fjes_driver_string[] =
24 		"FUJITSU Extended Socket Network Device Driver";
25 static const char fjes_copyright[] =
26 		"Copyright (c) 2015 FUJITSU LIMITED";
27 
28 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
29 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_VERSION);
32 
33 #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
34 
35 static int fjes_request_irq(struct fjes_adapter *);
36 static void fjes_free_irq(struct fjes_adapter *);
37 
38 static int fjes_open(struct net_device *);
39 static int fjes_close(struct net_device *);
40 static int fjes_setup_resources(struct fjes_adapter *);
41 static void fjes_free_resources(struct fjes_adapter *);
42 static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
43 static void fjes_raise_intr_rxdata_task(struct work_struct *);
44 static void fjes_tx_stall_task(struct work_struct *);
45 static void fjes_force_close_task(struct work_struct *);
46 static irqreturn_t fjes_intr(int, void*);
47 static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
48 static int fjes_change_mtu(struct net_device *, int);
49 static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
50 static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
51 static void fjes_tx_retry(struct net_device *);
52 
53 static int fjes_acpi_add(struct acpi_device *);
54 static int fjes_acpi_remove(struct acpi_device *);
55 static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
56 
57 static int fjes_probe(struct platform_device *);
58 static int fjes_remove(struct platform_device *);
59 
60 static int fjes_sw_init(struct fjes_adapter *);
61 static void fjes_netdev_setup(struct net_device *);
62 static void fjes_irq_watch_task(struct work_struct *);
63 static void fjes_watch_unshare_task(struct work_struct *);
64 static void fjes_rx_irq(struct fjes_adapter *, int);
65 static int fjes_poll(struct napi_struct *, int);
66 
67 static const struct acpi_device_id fjes_acpi_ids[] = {
68 	{ACPI_MOTHERBOARD_RESOURCE_HID, 0},
69 	{"", 0},
70 };
71 MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
72 
73 static struct acpi_driver fjes_acpi_driver = {
74 	.name = DRV_NAME,
75 	.class = DRV_NAME,
76 	.owner = THIS_MODULE,
77 	.ids = fjes_acpi_ids,
78 	.ops = {
79 		.add = fjes_acpi_add,
80 		.remove = fjes_acpi_remove,
81 	},
82 };
83 
84 static struct platform_driver fjes_driver = {
85 	.driver = {
86 		.name = DRV_NAME,
87 	},
88 	.probe = fjes_probe,
89 	.remove = fjes_remove,
90 };
91 
92 static struct resource fjes_resource[] = {
93 	{
94 		.flags = IORESOURCE_MEM,
95 		.start = 0,
96 		.end = 0,
97 	},
98 	{
99 		.flags = IORESOURCE_IRQ,
100 		.start = 0,
101 		.end = 0,
102 	},
103 };
104 
105 static bool is_extended_socket_device(struct acpi_device *device)
106 {
107 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
108 	char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
109 	union acpi_object *str;
110 	acpi_status status;
111 	int result;
112 
113 	status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
114 	if (ACPI_FAILURE(status))
115 		return false;
116 
117 	str = buffer.pointer;
118 	result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
119 				 str->string.length, UTF16_LITTLE_ENDIAN,
120 				 str_buf, sizeof(str_buf) - 1);
121 	str_buf[result] = 0;
122 
123 	if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
124 		kfree(buffer.pointer);
125 		return false;
126 	}
127 	kfree(buffer.pointer);
128 
129 	return true;
130 }
131 
132 static int acpi_check_extended_socket_status(struct acpi_device *device)
133 {
134 	unsigned long long sta;
135 	acpi_status status;
136 
137 	status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
138 	if (ACPI_FAILURE(status))
139 		return -ENODEV;
140 
141 	if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
142 	      (sta & ACPI_STA_DEVICE_ENABLED) &&
143 	      (sta & ACPI_STA_DEVICE_UI) &&
144 	      (sta & ACPI_STA_DEVICE_FUNCTIONING)))
145 		return -ENODEV;
146 
147 	return 0;
148 }
149 
150 static int fjes_acpi_add(struct acpi_device *device)
151 {
152 	struct platform_device *plat_dev;
153 	acpi_status status;
154 
155 	if (!is_extended_socket_device(device))
156 		return -ENODEV;
157 
158 	if (acpi_check_extended_socket_status(device))
159 		return -ENODEV;
160 
161 	status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
162 				     fjes_get_acpi_resource, fjes_resource);
163 	if (ACPI_FAILURE(status))
164 		return -ENODEV;
165 
166 	/* create platform_device */
167 	plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
168 						   ARRAY_SIZE(fjes_resource));
169 	device->driver_data = plat_dev;
170 
171 	return 0;
172 }
173 
174 static int fjes_acpi_remove(struct acpi_device *device)
175 {
176 	struct platform_device *plat_dev;
177 
178 	plat_dev = (struct platform_device *)acpi_driver_data(device);
179 	platform_device_unregister(plat_dev);
180 
181 	return 0;
182 }
183 
184 static acpi_status
185 fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
186 {
187 	struct acpi_resource_address32 *addr;
188 	struct acpi_resource_irq *irq;
189 	struct resource *res = data;
190 
191 	switch (acpi_res->type) {
192 	case ACPI_RESOURCE_TYPE_ADDRESS32:
193 		addr = &acpi_res->data.address32;
194 		res[0].start = addr->address.minimum;
195 		res[0].end = addr->address.minimum +
196 			addr->address.address_length - 1;
197 		break;
198 
199 	case ACPI_RESOURCE_TYPE_IRQ:
200 		irq = &acpi_res->data.irq;
201 		if (irq->interrupt_count != 1)
202 			return AE_ERROR;
203 		res[1].start = irq->interrupts[0];
204 		res[1].end = irq->interrupts[0];
205 		break;
206 
207 	default:
208 		break;
209 	}
210 
211 	return AE_OK;
212 }
213 
214 static int fjes_request_irq(struct fjes_adapter *adapter)
215 {
216 	struct net_device *netdev = adapter->netdev;
217 	int result = -1;
218 
219 	adapter->interrupt_watch_enable = true;
220 	if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
221 		queue_delayed_work(adapter->control_wq,
222 				   &adapter->interrupt_watch_task,
223 				   FJES_IRQ_WATCH_DELAY);
224 	}
225 
226 	if (!adapter->irq_registered) {
227 		result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
228 				     IRQF_SHARED, netdev->name, adapter);
229 		if (result)
230 			adapter->irq_registered = false;
231 		else
232 			adapter->irq_registered = true;
233 	}
234 
235 	return result;
236 }
237 
238 static void fjes_free_irq(struct fjes_adapter *adapter)
239 {
240 	struct fjes_hw *hw = &adapter->hw;
241 
242 	adapter->interrupt_watch_enable = false;
243 	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
244 
245 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
246 
247 	if (adapter->irq_registered) {
248 		free_irq(adapter->hw.hw_res.irq, adapter);
249 		adapter->irq_registered = false;
250 	}
251 }
252 
253 static const struct net_device_ops fjes_netdev_ops = {
254 	.ndo_open		= fjes_open,
255 	.ndo_stop		= fjes_close,
256 	.ndo_start_xmit		= fjes_xmit_frame,
257 	.ndo_get_stats64	= fjes_get_stats64,
258 	.ndo_change_mtu		= fjes_change_mtu,
259 	.ndo_tx_timeout		= fjes_tx_retry,
260 	.ndo_vlan_rx_add_vid	= fjes_vlan_rx_add_vid,
261 	.ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
262 };
263 
264 /* fjes_open - Called when a network interface is made active */
265 static int fjes_open(struct net_device *netdev)
266 {
267 	struct fjes_adapter *adapter = netdev_priv(netdev);
268 	struct fjes_hw *hw = &adapter->hw;
269 	int result;
270 
271 	if (adapter->open_guard)
272 		return -ENXIO;
273 
274 	result = fjes_setup_resources(adapter);
275 	if (result)
276 		goto err_setup_res;
277 
278 	hw->txrx_stop_req_bit = 0;
279 	hw->epstop_req_bit = 0;
280 
281 	napi_enable(&adapter->napi);
282 
283 	fjes_hw_capture_interrupt_status(hw);
284 
285 	result = fjes_request_irq(adapter);
286 	if (result)
287 		goto err_req_irq;
288 
289 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
290 
291 	netif_tx_start_all_queues(netdev);
292 	netif_carrier_on(netdev);
293 
294 	return 0;
295 
296 err_req_irq:
297 	fjes_free_irq(adapter);
298 	napi_disable(&adapter->napi);
299 
300 err_setup_res:
301 	fjes_free_resources(adapter);
302 	return result;
303 }
304 
305 /* fjes_close - Disables a network interface */
306 static int fjes_close(struct net_device *netdev)
307 {
308 	struct fjes_adapter *adapter = netdev_priv(netdev);
309 	struct fjes_hw *hw = &adapter->hw;
310 	unsigned long flags;
311 	int epidx;
312 
313 	netif_tx_stop_all_queues(netdev);
314 	netif_carrier_off(netdev);
315 
316 	fjes_hw_raise_epstop(hw);
317 
318 	napi_disable(&adapter->napi);
319 
320 	spin_lock_irqsave(&hw->rx_status_lock, flags);
321 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
322 		if (epidx == hw->my_epid)
323 			continue;
324 
325 		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
326 		    EP_PARTNER_SHARED)
327 			adapter->hw.ep_shm_info[epidx]
328 				   .tx.info->v1i.rx_status &=
329 				~FJES_RX_POLL_WORK;
330 	}
331 	spin_unlock_irqrestore(&hw->rx_status_lock, flags);
332 
333 	fjes_free_irq(adapter);
334 
335 	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
336 	cancel_work_sync(&adapter->unshare_watch_task);
337 	adapter->unshare_watch_bitmask = 0;
338 	cancel_work_sync(&adapter->raise_intr_rxdata_task);
339 	cancel_work_sync(&adapter->tx_stall_task);
340 
341 	cancel_work_sync(&hw->update_zone_task);
342 	cancel_work_sync(&hw->epstop_task);
343 
344 	fjes_hw_wait_epstop(hw);
345 
346 	fjes_free_resources(adapter);
347 
348 	return 0;
349 }
350 
351 static int fjes_setup_resources(struct fjes_adapter *adapter)
352 {
353 	struct net_device *netdev = adapter->netdev;
354 	struct ep_share_mem_info *buf_pair;
355 	struct fjes_hw *hw = &adapter->hw;
356 	unsigned long flags;
357 	int result;
358 	int epidx;
359 
360 	mutex_lock(&hw->hw_info.lock);
361 	result = fjes_hw_request_info(hw);
362 	switch (result) {
363 	case 0:
364 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
365 			hw->ep_shm_info[epidx].es_status =
366 			    hw->hw_info.res_buf->info.info[epidx].es_status;
367 			hw->ep_shm_info[epidx].zone =
368 			    hw->hw_info.res_buf->info.info[epidx].zone;
369 		}
370 		break;
371 	default:
372 	case -ENOMSG:
373 	case -EBUSY:
374 		adapter->force_reset = true;
375 
376 		mutex_unlock(&hw->hw_info.lock);
377 		return result;
378 	}
379 	mutex_unlock(&hw->hw_info.lock);
380 
381 	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
382 		if ((epidx != hw->my_epid) &&
383 		    (hw->ep_shm_info[epidx].es_status ==
384 		     FJES_ZONING_STATUS_ENABLE)) {
385 			fjes_hw_raise_interrupt(hw, epidx,
386 						REG_ICTL_MASK_INFO_UPDATE);
387 			hw->ep_shm_info[epidx].ep_stats
388 				.send_intr_zoneupdate += 1;
389 		}
390 	}
391 
392 	msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
393 
394 	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
395 		if (epidx == hw->my_epid)
396 			continue;
397 
398 		buf_pair = &hw->ep_shm_info[epidx];
399 
400 		spin_lock_irqsave(&hw->rx_status_lock, flags);
401 		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
402 				    netdev->mtu);
403 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
404 
405 		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
406 			mutex_lock(&hw->hw_info.lock);
407 			result =
408 			fjes_hw_register_buff_addr(hw, epidx, buf_pair);
409 			mutex_unlock(&hw->hw_info.lock);
410 
411 			switch (result) {
412 			case 0:
413 				break;
414 			case -ENOMSG:
415 			case -EBUSY:
416 			default:
417 				adapter->force_reset = true;
418 				return result;
419 			}
420 
421 			hw->ep_shm_info[epidx].ep_stats
422 				.com_regist_buf_exec += 1;
423 		}
424 	}
425 
426 	return 0;
427 }
428 
429 static void fjes_free_resources(struct fjes_adapter *adapter)
430 {
431 	struct net_device *netdev = adapter->netdev;
432 	struct fjes_device_command_param param;
433 	struct ep_share_mem_info *buf_pair;
434 	struct fjes_hw *hw = &adapter->hw;
435 	bool reset_flag = false;
436 	unsigned long flags;
437 	int result;
438 	int epidx;
439 
440 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
441 		if (epidx == hw->my_epid)
442 			continue;
443 
444 		mutex_lock(&hw->hw_info.lock);
445 		result = fjes_hw_unregister_buff_addr(hw, epidx);
446 		mutex_unlock(&hw->hw_info.lock);
447 
448 		hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
449 
450 		if (result)
451 			reset_flag = true;
452 
453 		buf_pair = &hw->ep_shm_info[epidx];
454 
455 		spin_lock_irqsave(&hw->rx_status_lock, flags);
456 		fjes_hw_setup_epbuf(&buf_pair->tx,
457 				    netdev->dev_addr, netdev->mtu);
458 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
459 
460 		clear_bit(epidx, &hw->txrx_stop_req_bit);
461 	}
462 
463 	if (reset_flag || adapter->force_reset) {
464 		result = fjes_hw_reset(hw);
465 
466 		adapter->force_reset = false;
467 
468 		if (result)
469 			adapter->open_guard = true;
470 
471 		hw->hw_info.buffer_share_bit = 0;
472 
473 		memset((void *)&param, 0, sizeof(param));
474 
475 		param.req_len = hw->hw_info.req_buf_size;
476 		param.req_start = __pa(hw->hw_info.req_buf);
477 		param.res_len = hw->hw_info.res_buf_size;
478 		param.res_start = __pa(hw->hw_info.res_buf);
479 		param.share_start = __pa(hw->hw_info.share->ep_status);
480 
481 		fjes_hw_init_command_registers(hw, &param);
482 	}
483 }
484 
485 static void fjes_tx_stall_task(struct work_struct *work)
486 {
487 	struct fjes_adapter *adapter = container_of(work,
488 			struct fjes_adapter, tx_stall_task);
489 	struct net_device *netdev = adapter->netdev;
490 	struct fjes_hw *hw = &adapter->hw;
491 	int all_queue_available, sendable;
492 	enum ep_partner_status pstatus;
493 	int max_epid, my_epid, epid;
494 	union ep_buffer_info *info;
495 	int i;
496 
497 	if (((long)jiffies -
498 		dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
499 		netif_wake_queue(netdev);
500 		return;
501 	}
502 
503 	my_epid = hw->my_epid;
504 	max_epid = hw->max_epid;
505 
506 	for (i = 0; i < 5; i++) {
507 		all_queue_available = 1;
508 
509 		for (epid = 0; epid < max_epid; epid++) {
510 			if (my_epid == epid)
511 				continue;
512 
513 			pstatus = fjes_hw_get_partner_ep_status(hw, epid);
514 			sendable = (pstatus == EP_PARTNER_SHARED);
515 			if (!sendable)
516 				continue;
517 
518 			info = adapter->hw.ep_shm_info[epid].tx.info;
519 
520 			if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
521 				return;
522 
523 			if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
524 					 info->v1i.count_max)) {
525 				all_queue_available = 0;
526 				break;
527 			}
528 		}
529 
530 		if (all_queue_available) {
531 			netif_wake_queue(netdev);
532 			return;
533 		}
534 	}
535 
536 	usleep_range(50, 100);
537 
538 	queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
539 }
540 
541 static void fjes_force_close_task(struct work_struct *work)
542 {
543 	struct fjes_adapter *adapter = container_of(work,
544 			struct fjes_adapter, force_close_task);
545 	struct net_device *netdev = adapter->netdev;
546 
547 	rtnl_lock();
548 	dev_close(netdev);
549 	rtnl_unlock();
550 }
551 
552 static void fjes_raise_intr_rxdata_task(struct work_struct *work)
553 {
554 	struct fjes_adapter *adapter = container_of(work,
555 			struct fjes_adapter, raise_intr_rxdata_task);
556 	struct fjes_hw *hw = &adapter->hw;
557 	enum ep_partner_status pstatus;
558 	int max_epid, my_epid, epid;
559 
560 	my_epid = hw->my_epid;
561 	max_epid = hw->max_epid;
562 
563 	for (epid = 0; epid < max_epid; epid++)
564 		hw->ep_shm_info[epid].tx_status_work = 0;
565 
566 	for (epid = 0; epid < max_epid; epid++) {
567 		if (epid == my_epid)
568 			continue;
569 
570 		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
571 		if (pstatus == EP_PARTNER_SHARED) {
572 			hw->ep_shm_info[epid].tx_status_work =
573 				hw->ep_shm_info[epid].tx.info->v1i.tx_status;
574 
575 			if (hw->ep_shm_info[epid].tx_status_work ==
576 				FJES_TX_DELAY_SEND_PENDING) {
577 				hw->ep_shm_info[epid].tx.info->v1i.tx_status =
578 					FJES_TX_DELAY_SEND_NONE;
579 			}
580 		}
581 	}
582 
583 	for (epid = 0; epid < max_epid; epid++) {
584 		if (epid == my_epid)
585 			continue;
586 
587 		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
588 		if ((hw->ep_shm_info[epid].tx_status_work ==
589 		     FJES_TX_DELAY_SEND_PENDING) &&
590 		    (pstatus == EP_PARTNER_SHARED) &&
591 		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
592 		      FJES_RX_POLL_WORK)) {
593 			fjes_hw_raise_interrupt(hw, epid,
594 						REG_ICTL_MASK_RX_DATA);
595 			hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
596 		}
597 	}
598 
599 	usleep_range(500, 1000);
600 }
601 
602 static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
603 			void *data, size_t len)
604 {
605 	int retval;
606 
607 	retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
608 					   data, len);
609 	if (retval)
610 		return retval;
611 
612 	adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
613 		FJES_TX_DELAY_SEND_PENDING;
614 	if (!work_pending(&adapter->raise_intr_rxdata_task))
615 		queue_work(adapter->txrx_wq,
616 			   &adapter->raise_intr_rxdata_task);
617 
618 	retval = 0;
619 	return retval;
620 }
621 
622 static netdev_tx_t
623 fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
624 {
625 	struct fjes_adapter *adapter = netdev_priv(netdev);
626 	struct fjes_hw *hw = &adapter->hw;
627 
628 	int max_epid, my_epid, dest_epid;
629 	enum ep_partner_status pstatus;
630 	struct netdev_queue *cur_queue;
631 	char shortpkt[VLAN_ETH_HLEN];
632 	bool is_multi, vlan;
633 	struct ethhdr *eth;
634 	u16 queue_no = 0;
635 	u16 vlan_id = 0;
636 	netdev_tx_t ret;
637 	char *data;
638 	int len;
639 
640 	ret = NETDEV_TX_OK;
641 	is_multi = false;
642 	cur_queue = netdev_get_tx_queue(netdev, queue_no);
643 
644 	eth = (struct ethhdr *)skb->data;
645 	my_epid = hw->my_epid;
646 
647 	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
648 
649 	data = skb->data;
650 	len = skb->len;
651 
652 	if (is_multicast_ether_addr(eth->h_dest)) {
653 		dest_epid = 0;
654 		max_epid = hw->max_epid;
655 		is_multi = true;
656 	} else if (is_local_ether_addr(eth->h_dest)) {
657 		dest_epid = eth->h_dest[ETH_ALEN - 1];
658 		max_epid = dest_epid + 1;
659 
660 		if ((eth->h_dest[0] == 0x02) &&
661 		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
662 			      eth->h_dest[3] | eth->h_dest[4])) &&
663 		    (dest_epid < hw->max_epid)) {
664 			;
665 		} else {
666 			dest_epid = 0;
667 			max_epid = 0;
668 			ret = NETDEV_TX_OK;
669 
670 			adapter->stats64.tx_packets += 1;
671 			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
672 			adapter->stats64.tx_bytes += len;
673 			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
674 		}
675 	} else {
676 		dest_epid = 0;
677 		max_epid = 0;
678 		ret = NETDEV_TX_OK;
679 
680 		adapter->stats64.tx_packets += 1;
681 		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
682 		adapter->stats64.tx_bytes += len;
683 		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
684 	}
685 
686 	for (; dest_epid < max_epid; dest_epid++) {
687 		if (my_epid == dest_epid)
688 			continue;
689 
690 		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
691 		if (pstatus != EP_PARTNER_SHARED) {
692 			if (!is_multi)
693 				hw->ep_shm_info[dest_epid].ep_stats
694 					.tx_dropped_not_shared += 1;
695 			ret = NETDEV_TX_OK;
696 		} else if (!fjes_hw_check_epbuf_version(
697 				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
698 			/* version is NOT 0 */
699 			adapter->stats64.tx_carrier_errors += 1;
700 			hw->ep_shm_info[dest_epid].net_stats
701 						.tx_carrier_errors += 1;
702 			hw->ep_shm_info[dest_epid].ep_stats
703 					.tx_dropped_ver_mismatch += 1;
704 
705 			ret = NETDEV_TX_OK;
706 		} else if (!fjes_hw_check_mtu(
707 				&adapter->hw.ep_shm_info[dest_epid].rx,
708 				netdev->mtu)) {
709 			adapter->stats64.tx_dropped += 1;
710 			hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
711 			adapter->stats64.tx_errors += 1;
712 			hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
713 			hw->ep_shm_info[dest_epid].ep_stats
714 					.tx_dropped_buf_size_mismatch += 1;
715 
716 			ret = NETDEV_TX_OK;
717 		} else if (vlan &&
718 			   !fjes_hw_check_vlan_id(
719 				&adapter->hw.ep_shm_info[dest_epid].rx,
720 				vlan_id)) {
721 			hw->ep_shm_info[dest_epid].ep_stats
722 				.tx_dropped_vlanid_mismatch += 1;
723 			ret = NETDEV_TX_OK;
724 		} else {
725 			if (len < VLAN_ETH_HLEN) {
726 				memset(shortpkt, 0, VLAN_ETH_HLEN);
727 				memcpy(shortpkt, skb->data, skb->len);
728 				len = VLAN_ETH_HLEN;
729 				data = shortpkt;
730 			}
731 
732 			if (adapter->tx_retry_count == 0) {
733 				adapter->tx_start_jiffies = jiffies;
734 				adapter->tx_retry_count = 1;
735 			} else {
736 				adapter->tx_retry_count++;
737 			}
738 
739 			if (fjes_tx_send(adapter, dest_epid, data, len)) {
740 				if (is_multi) {
741 					ret = NETDEV_TX_OK;
742 				} else if (
743 					   ((long)jiffies -
744 					    (long)adapter->tx_start_jiffies) >=
745 					    FJES_TX_RETRY_TIMEOUT) {
746 					adapter->stats64.tx_fifo_errors += 1;
747 					hw->ep_shm_info[dest_epid].net_stats
748 								.tx_fifo_errors += 1;
749 					adapter->stats64.tx_errors += 1;
750 					hw->ep_shm_info[dest_epid].net_stats
751 								.tx_errors += 1;
752 
753 					ret = NETDEV_TX_OK;
754 				} else {
755 					netif_trans_update(netdev);
756 					hw->ep_shm_info[dest_epid].ep_stats
757 						.tx_buffer_full += 1;
758 					netif_tx_stop_queue(cur_queue);
759 
760 					if (!work_pending(&adapter->tx_stall_task))
761 						queue_work(adapter->txrx_wq,
762 							   &adapter->tx_stall_task);
763 
764 					ret = NETDEV_TX_BUSY;
765 				}
766 			} else {
767 				if (!is_multi) {
768 					adapter->stats64.tx_packets += 1;
769 					hw->ep_shm_info[dest_epid].net_stats
770 								.tx_packets += 1;
771 					adapter->stats64.tx_bytes += len;
772 					hw->ep_shm_info[dest_epid].net_stats
773 								.tx_bytes += len;
774 				}
775 
776 				adapter->tx_retry_count = 0;
777 				ret = NETDEV_TX_OK;
778 			}
779 		}
780 	}
781 
782 	if (ret == NETDEV_TX_OK) {
783 		dev_kfree_skb(skb);
784 		if (is_multi) {
785 			adapter->stats64.tx_packets += 1;
786 			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
787 			adapter->stats64.tx_bytes += 1;
788 			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
789 		}
790 	}
791 
792 	return ret;
793 }
794 
795 static void fjes_tx_retry(struct net_device *netdev)
796 {
797 	struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
798 
799 	netif_tx_wake_queue(queue);
800 }
801 
802 static void
803 fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
804 {
805 	struct fjes_adapter *adapter = netdev_priv(netdev);
806 
807 	memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
808 }
809 
810 static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
811 {
812 	struct fjes_adapter *adapter = netdev_priv(netdev);
813 	bool running = netif_running(netdev);
814 	struct fjes_hw *hw = &adapter->hw;
815 	unsigned long flags;
816 	int ret = -EINVAL;
817 	int idx, epidx;
818 
819 	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
820 		if (new_mtu <= fjes_support_mtu[idx]) {
821 			new_mtu = fjes_support_mtu[idx];
822 			if (new_mtu == netdev->mtu)
823 				return 0;
824 
825 			ret = 0;
826 			break;
827 		}
828 	}
829 
830 	if (ret)
831 		return ret;
832 
833 	if (running) {
834 		spin_lock_irqsave(&hw->rx_status_lock, flags);
835 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
836 			if (epidx == hw->my_epid)
837 				continue;
838 			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
839 				~FJES_RX_MTU_CHANGING_DONE;
840 		}
841 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
842 
843 		netif_tx_stop_all_queues(netdev);
844 		netif_carrier_off(netdev);
845 		cancel_work_sync(&adapter->tx_stall_task);
846 		napi_disable(&adapter->napi);
847 
848 		msleep(1000);
849 
850 		netif_tx_stop_all_queues(netdev);
851 	}
852 
853 	netdev->mtu = new_mtu;
854 
855 	if (running) {
856 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
857 			if (epidx == hw->my_epid)
858 				continue;
859 
860 			spin_lock_irqsave(&hw->rx_status_lock, flags);
861 			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
862 					    netdev->dev_addr,
863 					    netdev->mtu);
864 
865 			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
866 				FJES_RX_MTU_CHANGING_DONE;
867 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
868 		}
869 
870 		netif_tx_wake_all_queues(netdev);
871 		netif_carrier_on(netdev);
872 		napi_enable(&adapter->napi);
873 		napi_schedule(&adapter->napi);
874 	}
875 
876 	return ret;
877 }
878 
879 static int fjes_vlan_rx_add_vid(struct net_device *netdev,
880 				__be16 proto, u16 vid)
881 {
882 	struct fjes_adapter *adapter = netdev_priv(netdev);
883 	bool ret = true;
884 	int epid;
885 
886 	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
887 		if (epid == adapter->hw.my_epid)
888 			continue;
889 
890 		if (!fjes_hw_check_vlan_id(
891 			&adapter->hw.ep_shm_info[epid].tx, vid))
892 			ret = fjes_hw_set_vlan_id(
893 				&adapter->hw.ep_shm_info[epid].tx, vid);
894 	}
895 
896 	return ret ? 0 : -ENOSPC;
897 }
898 
899 static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
900 				 __be16 proto, u16 vid)
901 {
902 	struct fjes_adapter *adapter = netdev_priv(netdev);
903 	int epid;
904 
905 	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
906 		if (epid == adapter->hw.my_epid)
907 			continue;
908 
909 		fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
910 	}
911 
912 	return 0;
913 }
914 
915 static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
916 				   int src_epid)
917 {
918 	struct fjes_hw *hw = &adapter->hw;
919 	enum ep_partner_status status;
920 	unsigned long flags;
921 
922 	status = fjes_hw_get_partner_ep_status(hw, src_epid);
923 	trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
924 	switch (status) {
925 	case EP_PARTNER_UNSHARE:
926 	case EP_PARTNER_COMPLETE:
927 	default:
928 		break;
929 	case EP_PARTNER_WAITING:
930 		if (src_epid < hw->my_epid) {
931 			spin_lock_irqsave(&hw->rx_status_lock, flags);
932 			hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
933 				FJES_RX_STOP_REQ_DONE;
934 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
935 
936 			clear_bit(src_epid, &hw->txrx_stop_req_bit);
937 			set_bit(src_epid, &adapter->unshare_watch_bitmask);
938 
939 			if (!work_pending(&adapter->unshare_watch_task))
940 				queue_work(adapter->control_wq,
941 					   &adapter->unshare_watch_task);
942 		}
943 		break;
944 	case EP_PARTNER_SHARED:
945 		if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
946 		    FJES_RX_STOP_REQ_REQUEST) {
947 			set_bit(src_epid, &hw->epstop_req_bit);
948 			if (!work_pending(&hw->epstop_task))
949 				queue_work(adapter->control_wq,
950 					   &hw->epstop_task);
951 		}
952 		break;
953 	}
954 	trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
955 }
956 
957 static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
958 {
959 	struct fjes_hw *hw = &adapter->hw;
960 	enum ep_partner_status status;
961 	unsigned long flags;
962 
963 	set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
964 
965 	status = fjes_hw_get_partner_ep_status(hw, src_epid);
966 	trace_fjes_stop_req_irq_pre(hw, src_epid, status);
967 	switch (status) {
968 	case EP_PARTNER_WAITING:
969 		spin_lock_irqsave(&hw->rx_status_lock, flags);
970 		hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
971 				FJES_RX_STOP_REQ_DONE;
972 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
973 		clear_bit(src_epid, &hw->txrx_stop_req_bit);
974 		/* fall through */
975 	case EP_PARTNER_UNSHARE:
976 	case EP_PARTNER_COMPLETE:
977 	default:
978 		set_bit(src_epid, &adapter->unshare_watch_bitmask);
979 		if (!work_pending(&adapter->unshare_watch_task))
980 			queue_work(adapter->control_wq,
981 				   &adapter->unshare_watch_task);
982 		break;
983 	case EP_PARTNER_SHARED:
984 		set_bit(src_epid, &hw->epstop_req_bit);
985 
986 		if (!work_pending(&hw->epstop_task))
987 			queue_work(adapter->control_wq, &hw->epstop_task);
988 		break;
989 	}
990 	trace_fjes_stop_req_irq_post(hw, src_epid);
991 }
992 
993 static void fjes_update_zone_irq(struct fjes_adapter *adapter,
994 				 int src_epid)
995 {
996 	struct fjes_hw *hw = &adapter->hw;
997 
998 	if (!work_pending(&hw->update_zone_task))
999 		queue_work(adapter->control_wq, &hw->update_zone_task);
1000 }
1001 
1002 static irqreturn_t fjes_intr(int irq, void *data)
1003 {
1004 	struct fjes_adapter *adapter = data;
1005 	struct fjes_hw *hw = &adapter->hw;
1006 	irqreturn_t ret;
1007 	u32 icr;
1008 
1009 	icr = fjes_hw_capture_interrupt_status(hw);
1010 
1011 	if (icr & REG_IS_MASK_IS_ASSERT) {
1012 		if (icr & REG_ICTL_MASK_RX_DATA) {
1013 			fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
1014 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1015 				.recv_intr_rx += 1;
1016 		}
1017 
1018 		if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
1019 			fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
1020 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1021 				.recv_intr_stop += 1;
1022 		}
1023 
1024 		if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
1025 			fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
1026 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1027 				.recv_intr_unshare += 1;
1028 		}
1029 
1030 		if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
1031 			fjes_hw_set_irqmask(hw,
1032 					    REG_ICTL_MASK_TXRX_STOP_DONE, true);
1033 
1034 		if (icr & REG_ICTL_MASK_INFO_UPDATE) {
1035 			fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
1036 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1037 				.recv_intr_zoneupdate += 1;
1038 		}
1039 
1040 		ret = IRQ_HANDLED;
1041 	} else {
1042 		ret = IRQ_NONE;
1043 	}
1044 
1045 	return ret;
1046 }
1047 
1048 static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
1049 				     int start_epid)
1050 {
1051 	struct fjes_hw *hw = &adapter->hw;
1052 	enum ep_partner_status pstatus;
1053 	int max_epid, cur_epid;
1054 	int i;
1055 
1056 	max_epid = hw->max_epid;
1057 	start_epid = (start_epid + 1 + max_epid) % max_epid;
1058 
1059 	for (i = 0; i < max_epid; i++) {
1060 		cur_epid = (start_epid + i) % max_epid;
1061 		if (cur_epid == hw->my_epid)
1062 			continue;
1063 
1064 		pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
1065 		if (pstatus == EP_PARTNER_SHARED) {
1066 			if (!fjes_hw_epbuf_rx_is_empty(
1067 				&hw->ep_shm_info[cur_epid].rx))
1068 				return cur_epid;
1069 		}
1070 	}
1071 	return -1;
1072 }
1073 
1074 static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
1075 			      int *cur_epid)
1076 {
1077 	void *frame;
1078 
1079 	*cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
1080 	if (*cur_epid < 0)
1081 		return NULL;
1082 
1083 	frame =
1084 	fjes_hw_epbuf_rx_curpkt_get_addr(
1085 		&adapter->hw.ep_shm_info[*cur_epid].rx, psize);
1086 
1087 	return frame;
1088 }
1089 
1090 static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
1091 {
1092 	fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
1093 }
1094 
1095 static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
1096 {
1097 	struct fjes_hw *hw = &adapter->hw;
1098 
1099 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
1100 
1101 	adapter->unset_rx_last = true;
1102 	napi_schedule(&adapter->napi);
1103 }
1104 
1105 static int fjes_poll(struct napi_struct *napi, int budget)
1106 {
1107 	struct fjes_adapter *adapter =
1108 			container_of(napi, struct fjes_adapter, napi);
1109 	struct net_device *netdev = napi->dev;
1110 	struct fjes_hw *hw = &adapter->hw;
1111 	struct sk_buff *skb;
1112 	int work_done = 0;
1113 	int cur_epid = 0;
1114 	int epidx;
1115 	size_t frame_len;
1116 	void *frame;
1117 
1118 	spin_lock(&hw->rx_status_lock);
1119 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
1120 		if (epidx == hw->my_epid)
1121 			continue;
1122 
1123 		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1124 		    EP_PARTNER_SHARED)
1125 			adapter->hw.ep_shm_info[epidx]
1126 				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
1127 	}
1128 	spin_unlock(&hw->rx_status_lock);
1129 
1130 	while (work_done < budget) {
1131 		prefetch(&adapter->hw);
1132 		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
1133 
1134 		if (frame) {
1135 			skb = napi_alloc_skb(napi, frame_len);
1136 			if (!skb) {
1137 				adapter->stats64.rx_dropped += 1;
1138 				hw->ep_shm_info[cur_epid].net_stats
1139 							 .rx_dropped += 1;
1140 				adapter->stats64.rx_errors += 1;
1141 				hw->ep_shm_info[cur_epid].net_stats
1142 							 .rx_errors += 1;
1143 			} else {
1144 				skb_put_data(skb, frame, frame_len);
1145 				skb->protocol = eth_type_trans(skb, netdev);
1146 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1147 
1148 				netif_receive_skb(skb);
1149 
1150 				work_done++;
1151 
1152 				adapter->stats64.rx_packets += 1;
1153 				hw->ep_shm_info[cur_epid].net_stats
1154 							 .rx_packets += 1;
1155 				adapter->stats64.rx_bytes += frame_len;
1156 				hw->ep_shm_info[cur_epid].net_stats
1157 							 .rx_bytes += frame_len;
1158 
1159 				if (is_multicast_ether_addr(
1160 					((struct ethhdr *)frame)->h_dest)) {
1161 					adapter->stats64.multicast += 1;
1162 					hw->ep_shm_info[cur_epid].net_stats
1163 								 .multicast += 1;
1164 				}
1165 			}
1166 
1167 			fjes_rxframe_release(adapter, cur_epid);
1168 			adapter->unset_rx_last = true;
1169 		} else {
1170 			break;
1171 		}
1172 	}
1173 
1174 	if (work_done < budget) {
1175 		napi_complete_done(napi, work_done);
1176 
1177 		if (adapter->unset_rx_last) {
1178 			adapter->rx_last_jiffies = jiffies;
1179 			adapter->unset_rx_last = false;
1180 		}
1181 
1182 		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
1183 			napi_reschedule(napi);
1184 		} else {
1185 			spin_lock(&hw->rx_status_lock);
1186 			for (epidx = 0; epidx < hw->max_epid; epidx++) {
1187 				if (epidx == hw->my_epid)
1188 					continue;
1189 				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1190 				    EP_PARTNER_SHARED)
1191 					adapter->hw.ep_shm_info[epidx].tx
1192 						   .info->v1i.rx_status &=
1193 						~FJES_RX_POLL_WORK;
1194 			}
1195 			spin_unlock(&hw->rx_status_lock);
1196 
1197 			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
1198 		}
1199 	}
1200 
1201 	return work_done;
1202 }
1203 
1204 /* fjes_probe - Device Initialization Routine */
1205 static int fjes_probe(struct platform_device *plat_dev)
1206 {
1207 	struct fjes_adapter *adapter;
1208 	struct net_device *netdev;
1209 	struct resource *res;
1210 	struct fjes_hw *hw;
1211 	int err;
1212 
1213 	err = -ENOMEM;
1214 	netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
1215 				 NET_NAME_UNKNOWN, fjes_netdev_setup,
1216 				 FJES_MAX_QUEUES);
1217 
1218 	if (!netdev)
1219 		goto err_out;
1220 
1221 	SET_NETDEV_DEV(netdev, &plat_dev->dev);
1222 
1223 	dev_set_drvdata(&plat_dev->dev, netdev);
1224 	adapter = netdev_priv(netdev);
1225 	adapter->netdev = netdev;
1226 	adapter->plat_dev = plat_dev;
1227 	hw = &adapter->hw;
1228 	hw->back = adapter;
1229 
1230 	/* setup the private structure */
1231 	err = fjes_sw_init(adapter);
1232 	if (err)
1233 		goto err_free_netdev;
1234 
1235 	INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
1236 	adapter->force_reset = false;
1237 	adapter->open_guard = false;
1238 
1239 	adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1240 	adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1241 					      WQ_MEM_RECLAIM, 0);
1242 
1243 	INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1244 	INIT_WORK(&adapter->raise_intr_rxdata_task,
1245 		  fjes_raise_intr_rxdata_task);
1246 	INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
1247 	adapter->unshare_watch_bitmask = 0;
1248 
1249 	INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
1250 	adapter->interrupt_watch_enable = false;
1251 
1252 	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
1253 	hw->hw_res.start = res->start;
1254 	hw->hw_res.size = resource_size(res);
1255 	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1256 	err = fjes_hw_init(&adapter->hw);
1257 	if (err)
1258 		goto err_free_netdev;
1259 
1260 	/* setup MAC address (02:00:00:00:00:[epid])*/
1261 	netdev->dev_addr[0] = 2;
1262 	netdev->dev_addr[1] = 0;
1263 	netdev->dev_addr[2] = 0;
1264 	netdev->dev_addr[3] = 0;
1265 	netdev->dev_addr[4] = 0;
1266 	netdev->dev_addr[5] = hw->my_epid; /* EPID */
1267 
1268 	err = register_netdev(netdev);
1269 	if (err)
1270 		goto err_hw_exit;
1271 
1272 	netif_carrier_off(netdev);
1273 
1274 	fjes_dbg_adapter_init(adapter);
1275 
1276 	return 0;
1277 
1278 err_hw_exit:
1279 	fjes_hw_exit(&adapter->hw);
1280 err_free_netdev:
1281 	free_netdev(netdev);
1282 err_out:
1283 	return err;
1284 }
1285 
1286 /* fjes_remove - Device Removal Routine */
1287 static int fjes_remove(struct platform_device *plat_dev)
1288 {
1289 	struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
1290 	struct fjes_adapter *adapter = netdev_priv(netdev);
1291 	struct fjes_hw *hw = &adapter->hw;
1292 
1293 	fjes_dbg_adapter_exit(adapter);
1294 
1295 	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
1296 	cancel_work_sync(&adapter->unshare_watch_task);
1297 	cancel_work_sync(&adapter->raise_intr_rxdata_task);
1298 	cancel_work_sync(&adapter->tx_stall_task);
1299 	if (adapter->control_wq)
1300 		destroy_workqueue(adapter->control_wq);
1301 	if (adapter->txrx_wq)
1302 		destroy_workqueue(adapter->txrx_wq);
1303 
1304 	unregister_netdev(netdev);
1305 
1306 	fjes_hw_exit(hw);
1307 
1308 	netif_napi_del(&adapter->napi);
1309 
1310 	free_netdev(netdev);
1311 
1312 	return 0;
1313 }
1314 
1315 static int fjes_sw_init(struct fjes_adapter *adapter)
1316 {
1317 	struct net_device *netdev = adapter->netdev;
1318 
1319 	netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
1320 
1321 	return 0;
1322 }
1323 
1324 /* fjes_netdev_setup - netdevice initialization routine */
1325 static void fjes_netdev_setup(struct net_device *netdev)
1326 {
1327 	ether_setup(netdev);
1328 
1329 	netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
1330 	netdev->netdev_ops = &fjes_netdev_ops;
1331 	fjes_set_ethtool_ops(netdev);
1332 	netdev->mtu = fjes_support_mtu[3];
1333 	netdev->min_mtu = fjes_support_mtu[0];
1334 	netdev->max_mtu = fjes_support_mtu[3];
1335 	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1336 }
1337 
1338 static void fjes_irq_watch_task(struct work_struct *work)
1339 {
1340 	struct fjes_adapter *adapter = container_of(to_delayed_work(work),
1341 			struct fjes_adapter, interrupt_watch_task);
1342 
1343 	local_irq_disable();
1344 	fjes_intr(adapter->hw.hw_res.irq, adapter);
1345 	local_irq_enable();
1346 
1347 	if (fjes_rxframe_search_exist(adapter, 0) >= 0)
1348 		napi_schedule(&adapter->napi);
1349 
1350 	if (adapter->interrupt_watch_enable) {
1351 		if (!delayed_work_pending(&adapter->interrupt_watch_task))
1352 			queue_delayed_work(adapter->control_wq,
1353 					   &adapter->interrupt_watch_task,
1354 					   FJES_IRQ_WATCH_DELAY);
1355 	}
1356 }
1357 
1358 static void fjes_watch_unshare_task(struct work_struct *work)
1359 {
1360 	struct fjes_adapter *adapter =
1361 	container_of(work, struct fjes_adapter, unshare_watch_task);
1362 
1363 	struct net_device *netdev = adapter->netdev;
1364 	struct fjes_hw *hw = &adapter->hw;
1365 
1366 	int unshare_watch, unshare_reserve;
1367 	int max_epid, my_epid, epidx;
1368 	int stop_req, stop_req_done;
1369 	ulong unshare_watch_bitmask;
1370 	unsigned long flags;
1371 	int wait_time = 0;
1372 	int is_shared;
1373 	int ret;
1374 
1375 	my_epid = hw->my_epid;
1376 	max_epid = hw->max_epid;
1377 
1378 	unshare_watch_bitmask = adapter->unshare_watch_bitmask;
1379 	adapter->unshare_watch_bitmask = 0;
1380 
1381 	while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
1382 	       (wait_time < 3000)) {
1383 		for (epidx = 0; epidx < max_epid; epidx++) {
1384 			if (epidx == my_epid)
1385 				continue;
1386 
1387 			is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
1388 							   epidx);
1389 
1390 			stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
1391 
1392 			stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
1393 					FJES_RX_STOP_REQ_DONE;
1394 
1395 			unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
1396 
1397 			unshare_reserve = test_bit(epidx,
1398 						   &hw->hw_info.buffer_unshare_reserve_bit);
1399 
1400 			if ((!stop_req ||
1401 			     (is_shared && (!is_shared || !stop_req_done))) &&
1402 			    (is_shared || !unshare_watch || !unshare_reserve))
1403 				continue;
1404 
1405 			mutex_lock(&hw->hw_info.lock);
1406 			ret = fjes_hw_unregister_buff_addr(hw, epidx);
1407 			switch (ret) {
1408 			case 0:
1409 				break;
1410 			case -ENOMSG:
1411 			case -EBUSY:
1412 			default:
1413 				if (!work_pending(
1414 					&adapter->force_close_task)) {
1415 					adapter->force_reset = true;
1416 					schedule_work(
1417 						&adapter->force_close_task);
1418 				}
1419 				break;
1420 			}
1421 			mutex_unlock(&hw->hw_info.lock);
1422 			hw->ep_shm_info[epidx].ep_stats
1423 					.com_unregist_buf_exec += 1;
1424 
1425 			spin_lock_irqsave(&hw->rx_status_lock, flags);
1426 			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1427 					    netdev->dev_addr, netdev->mtu);
1428 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1429 
1430 			clear_bit(epidx, &hw->txrx_stop_req_bit);
1431 			clear_bit(epidx, &unshare_watch_bitmask);
1432 			clear_bit(epidx,
1433 				  &hw->hw_info.buffer_unshare_reserve_bit);
1434 		}
1435 
1436 		msleep(100);
1437 		wait_time += 100;
1438 	}
1439 
1440 	if (hw->hw_info.buffer_unshare_reserve_bit) {
1441 		for (epidx = 0; epidx < max_epid; epidx++) {
1442 			if (epidx == my_epid)
1443 				continue;
1444 
1445 			if (test_bit(epidx,
1446 				     &hw->hw_info.buffer_unshare_reserve_bit)) {
1447 				mutex_lock(&hw->hw_info.lock);
1448 
1449 				ret = fjes_hw_unregister_buff_addr(hw, epidx);
1450 				switch (ret) {
1451 				case 0:
1452 					break;
1453 				case -ENOMSG:
1454 				case -EBUSY:
1455 				default:
1456 					if (!work_pending(
1457 						&adapter->force_close_task)) {
1458 						adapter->force_reset = true;
1459 						schedule_work(
1460 							&adapter->force_close_task);
1461 					}
1462 					break;
1463 				}
1464 				mutex_unlock(&hw->hw_info.lock);
1465 
1466 				hw->ep_shm_info[epidx].ep_stats
1467 					.com_unregist_buf_exec += 1;
1468 
1469 				spin_lock_irqsave(&hw->rx_status_lock, flags);
1470 				fjes_hw_setup_epbuf(
1471 					&hw->ep_shm_info[epidx].tx,
1472 					netdev->dev_addr, netdev->mtu);
1473 				spin_unlock_irqrestore(&hw->rx_status_lock,
1474 						       flags);
1475 
1476 				clear_bit(epidx, &hw->txrx_stop_req_bit);
1477 				clear_bit(epidx, &unshare_watch_bitmask);
1478 				clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1479 			}
1480 
1481 			if (test_bit(epidx, &unshare_watch_bitmask)) {
1482 				spin_lock_irqsave(&hw->rx_status_lock, flags);
1483 				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
1484 						~FJES_RX_STOP_REQ_DONE;
1485 				spin_unlock_irqrestore(&hw->rx_status_lock,
1486 						       flags);
1487 			}
1488 		}
1489 	}
1490 }
1491 
1492 static acpi_status
1493 acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
1494 				 void *context, void **return_value)
1495 {
1496 	struct acpi_device *device;
1497 	bool *found = context;
1498 	int result;
1499 
1500 	result = acpi_bus_get_device(obj_handle, &device);
1501 	if (result)
1502 		return AE_OK;
1503 
1504 	if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
1505 		return AE_OK;
1506 
1507 	if (!is_extended_socket_device(device))
1508 		return AE_OK;
1509 
1510 	if (acpi_check_extended_socket_status(device))
1511 		return AE_OK;
1512 
1513 	*found = true;
1514 	return AE_CTRL_TERMINATE;
1515 }
1516 
1517 /* fjes_init_module - Driver Registration Routine */
1518 static int __init fjes_init_module(void)
1519 {
1520 	bool found = false;
1521 	int result;
1522 
1523 	acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
1524 			    acpi_find_extended_socket_device, NULL, &found,
1525 			    NULL);
1526 
1527 	if (!found)
1528 		return -ENODEV;
1529 
1530 	pr_info("%s - version %s - %s\n",
1531 		fjes_driver_string, fjes_driver_version, fjes_copyright);
1532 
1533 	fjes_dbg_init();
1534 
1535 	result = platform_driver_register(&fjes_driver);
1536 	if (result < 0) {
1537 		fjes_dbg_exit();
1538 		return result;
1539 	}
1540 
1541 	result = acpi_bus_register_driver(&fjes_acpi_driver);
1542 	if (result < 0)
1543 		goto fail_acpi_driver;
1544 
1545 	return 0;
1546 
1547 fail_acpi_driver:
1548 	platform_driver_unregister(&fjes_driver);
1549 	fjes_dbg_exit();
1550 	return result;
1551 }
1552 
1553 module_init(fjes_init_module);
1554 
1555 /* fjes_exit_module - Driver Exit Cleanup Routine */
1556 static void __exit fjes_exit_module(void)
1557 {
1558 	acpi_bus_unregister_driver(&fjes_acpi_driver);
1559 	platform_driver_unregister(&fjes_driver);
1560 	fjes_dbg_exit();
1561 }
1562 
1563 module_exit(fjes_exit_module);
1564