xref: /linux/drivers/net/fjes/fjes_main.c (revision e6a901a00822659181c93c86d8bbc2a17779fddc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  FUJITSU Extended Socket Network Device driver
4  *  Copyright (c) 2015 FUJITSU LIMITED
5  */
6 
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/nls.h>
10 #include <linux/platform_device.h>
11 #include <linux/netdevice.h>
12 #include <linux/interrupt.h>
13 
14 #include "fjes.h"
15 #include "fjes_trace.h"
16 
17 #define MAJ 1
18 #define MIN 2
19 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
20 #define DRV_NAME	"fjes"
21 char fjes_driver_name[] = DRV_NAME;
22 char fjes_driver_version[] = DRV_VERSION;
23 static const char fjes_driver_string[] =
24 		"FUJITSU Extended Socket Network Device Driver";
25 static const char fjes_copyright[] =
26 		"Copyright (c) 2015 FUJITSU LIMITED";
27 
28 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
29 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_VERSION);
32 
33 #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
34 
35 static const struct acpi_device_id fjes_acpi_ids[] = {
36 	{ACPI_MOTHERBOARD_RESOURCE_HID, 0},
37 	{"", 0},
38 };
39 MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
40 
41 static bool is_extended_socket_device(struct acpi_device *device)
42 {
43 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
44 	char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
45 	union acpi_object *str;
46 	acpi_status status;
47 	int result;
48 
49 	status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
50 	if (ACPI_FAILURE(status))
51 		return false;
52 
53 	str = buffer.pointer;
54 	result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
55 				 str->string.length, UTF16_LITTLE_ENDIAN,
56 				 str_buf, sizeof(str_buf) - 1);
57 	str_buf[result] = 0;
58 
59 	if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
60 		kfree(buffer.pointer);
61 		return false;
62 	}
63 	kfree(buffer.pointer);
64 
65 	return true;
66 }
67 
68 static int acpi_check_extended_socket_status(struct acpi_device *device)
69 {
70 	unsigned long long sta;
71 	acpi_status status;
72 
73 	status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
74 	if (ACPI_FAILURE(status))
75 		return -ENODEV;
76 
77 	if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
78 	      (sta & ACPI_STA_DEVICE_ENABLED) &&
79 	      (sta & ACPI_STA_DEVICE_UI) &&
80 	      (sta & ACPI_STA_DEVICE_FUNCTIONING)))
81 		return -ENODEV;
82 
83 	return 0;
84 }
85 
86 static acpi_status
87 fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
88 {
89 	struct acpi_resource_address32 *addr;
90 	struct acpi_resource_irq *irq;
91 	struct resource *res = data;
92 
93 	switch (acpi_res->type) {
94 	case ACPI_RESOURCE_TYPE_ADDRESS32:
95 		addr = &acpi_res->data.address32;
96 		res[0].start = addr->address.minimum;
97 		res[0].end = addr->address.minimum +
98 			addr->address.address_length - 1;
99 		break;
100 
101 	case ACPI_RESOURCE_TYPE_IRQ:
102 		irq = &acpi_res->data.irq;
103 		if (irq->interrupt_count != 1)
104 			return AE_ERROR;
105 		res[1].start = irq->interrupts[0];
106 		res[1].end = irq->interrupts[0];
107 		break;
108 
109 	default:
110 		break;
111 	}
112 
113 	return AE_OK;
114 }
115 
116 static struct resource fjes_resource[] = {
117 	DEFINE_RES_MEM(0, 1),
118 	DEFINE_RES_IRQ(0)
119 };
120 
121 static int fjes_acpi_add(struct acpi_device *device)
122 {
123 	struct platform_device *plat_dev;
124 	acpi_status status;
125 
126 	if (!is_extended_socket_device(device))
127 		return -ENODEV;
128 
129 	if (acpi_check_extended_socket_status(device))
130 		return -ENODEV;
131 
132 	status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
133 				     fjes_get_acpi_resource, fjes_resource);
134 	if (ACPI_FAILURE(status))
135 		return -ENODEV;
136 
137 	/* create platform_device */
138 	plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
139 						   ARRAY_SIZE(fjes_resource));
140 	if (IS_ERR(plat_dev))
141 		return PTR_ERR(plat_dev);
142 
143 	device->driver_data = plat_dev;
144 
145 	return 0;
146 }
147 
148 static void fjes_acpi_remove(struct acpi_device *device)
149 {
150 	struct platform_device *plat_dev;
151 
152 	plat_dev = (struct platform_device *)acpi_driver_data(device);
153 	platform_device_unregister(plat_dev);
154 }
155 
156 static struct acpi_driver fjes_acpi_driver = {
157 	.name = DRV_NAME,
158 	.class = DRV_NAME,
159 	.owner = THIS_MODULE,
160 	.ids = fjes_acpi_ids,
161 	.ops = {
162 		.add = fjes_acpi_add,
163 		.remove = fjes_acpi_remove,
164 	},
165 };
166 
167 static int fjes_setup_resources(struct fjes_adapter *adapter)
168 {
169 	struct net_device *netdev = adapter->netdev;
170 	struct ep_share_mem_info *buf_pair;
171 	struct fjes_hw *hw = &adapter->hw;
172 	unsigned long flags;
173 	int result;
174 	int epidx;
175 
176 	mutex_lock(&hw->hw_info.lock);
177 	result = fjes_hw_request_info(hw);
178 	switch (result) {
179 	case 0:
180 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
181 			hw->ep_shm_info[epidx].es_status =
182 			    hw->hw_info.res_buf->info.info[epidx].es_status;
183 			hw->ep_shm_info[epidx].zone =
184 			    hw->hw_info.res_buf->info.info[epidx].zone;
185 		}
186 		break;
187 	default:
188 	case -ENOMSG:
189 	case -EBUSY:
190 		adapter->force_reset = true;
191 
192 		mutex_unlock(&hw->hw_info.lock);
193 		return result;
194 	}
195 	mutex_unlock(&hw->hw_info.lock);
196 
197 	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
198 		if ((epidx != hw->my_epid) &&
199 		    (hw->ep_shm_info[epidx].es_status ==
200 		     FJES_ZONING_STATUS_ENABLE)) {
201 			fjes_hw_raise_interrupt(hw, epidx,
202 						REG_ICTL_MASK_INFO_UPDATE);
203 			hw->ep_shm_info[epidx].ep_stats
204 				.send_intr_zoneupdate += 1;
205 		}
206 	}
207 
208 	msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
209 
210 	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
211 		if (epidx == hw->my_epid)
212 			continue;
213 
214 		buf_pair = &hw->ep_shm_info[epidx];
215 
216 		spin_lock_irqsave(&hw->rx_status_lock, flags);
217 		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
218 				    netdev->mtu);
219 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
220 
221 		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
222 			mutex_lock(&hw->hw_info.lock);
223 			result =
224 			fjes_hw_register_buff_addr(hw, epidx, buf_pair);
225 			mutex_unlock(&hw->hw_info.lock);
226 
227 			switch (result) {
228 			case 0:
229 				break;
230 			case -ENOMSG:
231 			case -EBUSY:
232 			default:
233 				adapter->force_reset = true;
234 				return result;
235 			}
236 
237 			hw->ep_shm_info[epidx].ep_stats
238 				.com_regist_buf_exec += 1;
239 		}
240 	}
241 
242 	return 0;
243 }
244 
245 static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
246 {
247 	struct fjes_hw *hw = &adapter->hw;
248 
249 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
250 
251 	adapter->unset_rx_last = true;
252 	napi_schedule(&adapter->napi);
253 }
254 
255 static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
256 {
257 	struct fjes_hw *hw = &adapter->hw;
258 	enum ep_partner_status status;
259 	unsigned long flags;
260 
261 	set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
262 
263 	status = fjes_hw_get_partner_ep_status(hw, src_epid);
264 	trace_fjes_stop_req_irq_pre(hw, src_epid, status);
265 	switch (status) {
266 	case EP_PARTNER_WAITING:
267 		spin_lock_irqsave(&hw->rx_status_lock, flags);
268 		hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
269 				FJES_RX_STOP_REQ_DONE;
270 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
271 		clear_bit(src_epid, &hw->txrx_stop_req_bit);
272 		fallthrough;
273 	case EP_PARTNER_UNSHARE:
274 	case EP_PARTNER_COMPLETE:
275 	default:
276 		set_bit(src_epid, &adapter->unshare_watch_bitmask);
277 		if (!work_pending(&adapter->unshare_watch_task))
278 			queue_work(adapter->control_wq,
279 				   &adapter->unshare_watch_task);
280 		break;
281 	case EP_PARTNER_SHARED:
282 		set_bit(src_epid, &hw->epstop_req_bit);
283 
284 		if (!work_pending(&hw->epstop_task))
285 			queue_work(adapter->control_wq, &hw->epstop_task);
286 		break;
287 	}
288 	trace_fjes_stop_req_irq_post(hw, src_epid);
289 }
290 
291 static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
292 				   int src_epid)
293 {
294 	struct fjes_hw *hw = &adapter->hw;
295 	enum ep_partner_status status;
296 	unsigned long flags;
297 
298 	status = fjes_hw_get_partner_ep_status(hw, src_epid);
299 	trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
300 	switch (status) {
301 	case EP_PARTNER_UNSHARE:
302 	case EP_PARTNER_COMPLETE:
303 	default:
304 		break;
305 	case EP_PARTNER_WAITING:
306 		if (src_epid < hw->my_epid) {
307 			spin_lock_irqsave(&hw->rx_status_lock, flags);
308 			hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
309 				FJES_RX_STOP_REQ_DONE;
310 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
311 
312 			clear_bit(src_epid, &hw->txrx_stop_req_bit);
313 			set_bit(src_epid, &adapter->unshare_watch_bitmask);
314 
315 			if (!work_pending(&adapter->unshare_watch_task))
316 				queue_work(adapter->control_wq,
317 					   &adapter->unshare_watch_task);
318 		}
319 		break;
320 	case EP_PARTNER_SHARED:
321 		if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
322 		    FJES_RX_STOP_REQ_REQUEST) {
323 			set_bit(src_epid, &hw->epstop_req_bit);
324 			if (!work_pending(&hw->epstop_task))
325 				queue_work(adapter->control_wq,
326 					   &hw->epstop_task);
327 		}
328 		break;
329 	}
330 	trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
331 }
332 
333 static void fjes_update_zone_irq(struct fjes_adapter *adapter,
334 				 int src_epid)
335 {
336 	struct fjes_hw *hw = &adapter->hw;
337 
338 	if (!work_pending(&hw->update_zone_task))
339 		queue_work(adapter->control_wq, &hw->update_zone_task);
340 }
341 
342 static irqreturn_t fjes_intr(int irq, void *data)
343 {
344 	struct fjes_adapter *adapter = data;
345 	struct fjes_hw *hw = &adapter->hw;
346 	irqreturn_t ret;
347 	u32 icr;
348 
349 	icr = fjes_hw_capture_interrupt_status(hw);
350 
351 	if (icr & REG_IS_MASK_IS_ASSERT) {
352 		if (icr & REG_ICTL_MASK_RX_DATA) {
353 			fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
354 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
355 				.recv_intr_rx += 1;
356 		}
357 
358 		if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
359 			fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
360 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
361 				.recv_intr_stop += 1;
362 		}
363 
364 		if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
365 			fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
366 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
367 				.recv_intr_unshare += 1;
368 		}
369 
370 		if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
371 			fjes_hw_set_irqmask(hw,
372 					    REG_ICTL_MASK_TXRX_STOP_DONE, true);
373 
374 		if (icr & REG_ICTL_MASK_INFO_UPDATE) {
375 			fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
376 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
377 				.recv_intr_zoneupdate += 1;
378 		}
379 
380 		ret = IRQ_HANDLED;
381 	} else {
382 		ret = IRQ_NONE;
383 	}
384 
385 	return ret;
386 }
387 
388 static int fjes_request_irq(struct fjes_adapter *adapter)
389 {
390 	struct net_device *netdev = adapter->netdev;
391 	int result = -1;
392 
393 	adapter->interrupt_watch_enable = true;
394 	if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
395 		queue_delayed_work(adapter->control_wq,
396 				   &adapter->interrupt_watch_task,
397 				   FJES_IRQ_WATCH_DELAY);
398 	}
399 
400 	if (!adapter->irq_registered) {
401 		result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
402 				     IRQF_SHARED, netdev->name, adapter);
403 		if (result)
404 			adapter->irq_registered = false;
405 		else
406 			adapter->irq_registered = true;
407 	}
408 
409 	return result;
410 }
411 
412 static void fjes_free_irq(struct fjes_adapter *adapter)
413 {
414 	struct fjes_hw *hw = &adapter->hw;
415 
416 	adapter->interrupt_watch_enable = false;
417 	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
418 
419 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
420 
421 	if (adapter->irq_registered) {
422 		free_irq(adapter->hw.hw_res.irq, adapter);
423 		adapter->irq_registered = false;
424 	}
425 }
426 
427 static void fjes_free_resources(struct fjes_adapter *adapter)
428 {
429 	struct net_device *netdev = adapter->netdev;
430 	struct fjes_device_command_param param;
431 	struct ep_share_mem_info *buf_pair;
432 	struct fjes_hw *hw = &adapter->hw;
433 	bool reset_flag = false;
434 	unsigned long flags;
435 	int result;
436 	int epidx;
437 
438 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
439 		if (epidx == hw->my_epid)
440 			continue;
441 
442 		mutex_lock(&hw->hw_info.lock);
443 		result = fjes_hw_unregister_buff_addr(hw, epidx);
444 		mutex_unlock(&hw->hw_info.lock);
445 
446 		hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
447 
448 		if (result)
449 			reset_flag = true;
450 
451 		buf_pair = &hw->ep_shm_info[epidx];
452 
453 		spin_lock_irqsave(&hw->rx_status_lock, flags);
454 		fjes_hw_setup_epbuf(&buf_pair->tx,
455 				    netdev->dev_addr, netdev->mtu);
456 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
457 
458 		clear_bit(epidx, &hw->txrx_stop_req_bit);
459 	}
460 
461 	if (reset_flag || adapter->force_reset) {
462 		result = fjes_hw_reset(hw);
463 
464 		adapter->force_reset = false;
465 
466 		if (result)
467 			adapter->open_guard = true;
468 
469 		hw->hw_info.buffer_share_bit = 0;
470 
471 		memset((void *)&param, 0, sizeof(param));
472 
473 		param.req_len = hw->hw_info.req_buf_size;
474 		param.req_start = __pa(hw->hw_info.req_buf);
475 		param.res_len = hw->hw_info.res_buf_size;
476 		param.res_start = __pa(hw->hw_info.res_buf);
477 		param.share_start = __pa(hw->hw_info.share->ep_status);
478 
479 		fjes_hw_init_command_registers(hw, &param);
480 	}
481 }
482 
483 /* fjes_open - Called when a network interface is made active */
484 static int fjes_open(struct net_device *netdev)
485 {
486 	struct fjes_adapter *adapter = netdev_priv(netdev);
487 	struct fjes_hw *hw = &adapter->hw;
488 	int result;
489 
490 	if (adapter->open_guard)
491 		return -ENXIO;
492 
493 	result = fjes_setup_resources(adapter);
494 	if (result)
495 		goto err_setup_res;
496 
497 	hw->txrx_stop_req_bit = 0;
498 	hw->epstop_req_bit = 0;
499 
500 	napi_enable(&adapter->napi);
501 
502 	fjes_hw_capture_interrupt_status(hw);
503 
504 	result = fjes_request_irq(adapter);
505 	if (result)
506 		goto err_req_irq;
507 
508 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
509 
510 	netif_tx_start_all_queues(netdev);
511 	netif_carrier_on(netdev);
512 
513 	return 0;
514 
515 err_req_irq:
516 	fjes_free_irq(adapter);
517 	napi_disable(&adapter->napi);
518 
519 err_setup_res:
520 	fjes_free_resources(adapter);
521 	return result;
522 }
523 
524 /* fjes_close - Disables a network interface */
525 static int fjes_close(struct net_device *netdev)
526 {
527 	struct fjes_adapter *adapter = netdev_priv(netdev);
528 	struct fjes_hw *hw = &adapter->hw;
529 	unsigned long flags;
530 	int epidx;
531 
532 	netif_tx_stop_all_queues(netdev);
533 	netif_carrier_off(netdev);
534 
535 	fjes_hw_raise_epstop(hw);
536 
537 	napi_disable(&adapter->napi);
538 
539 	spin_lock_irqsave(&hw->rx_status_lock, flags);
540 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
541 		if (epidx == hw->my_epid)
542 			continue;
543 
544 		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
545 		    EP_PARTNER_SHARED)
546 			adapter->hw.ep_shm_info[epidx]
547 				   .tx.info->v1i.rx_status &=
548 				~FJES_RX_POLL_WORK;
549 	}
550 	spin_unlock_irqrestore(&hw->rx_status_lock, flags);
551 
552 	fjes_free_irq(adapter);
553 
554 	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
555 	cancel_work_sync(&adapter->unshare_watch_task);
556 	adapter->unshare_watch_bitmask = 0;
557 	cancel_work_sync(&adapter->raise_intr_rxdata_task);
558 	cancel_work_sync(&adapter->tx_stall_task);
559 
560 	cancel_work_sync(&hw->update_zone_task);
561 	cancel_work_sync(&hw->epstop_task);
562 
563 	fjes_hw_wait_epstop(hw);
564 
565 	fjes_free_resources(adapter);
566 
567 	return 0;
568 }
569 
570 static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
571 			void *data, size_t len)
572 {
573 	int retval;
574 
575 	retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
576 					   data, len);
577 	if (retval)
578 		return retval;
579 
580 	adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
581 		FJES_TX_DELAY_SEND_PENDING;
582 	if (!work_pending(&adapter->raise_intr_rxdata_task))
583 		queue_work(adapter->txrx_wq,
584 			   &adapter->raise_intr_rxdata_task);
585 
586 	retval = 0;
587 	return retval;
588 }
589 
590 static netdev_tx_t
591 fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
592 {
593 	struct fjes_adapter *adapter = netdev_priv(netdev);
594 	struct fjes_hw *hw = &adapter->hw;
595 
596 	int max_epid, my_epid, dest_epid;
597 	enum ep_partner_status pstatus;
598 	struct netdev_queue *cur_queue;
599 	char shortpkt[VLAN_ETH_HLEN];
600 	bool is_multi, vlan;
601 	struct ethhdr *eth;
602 	u16 queue_no = 0;
603 	u16 vlan_id = 0;
604 	netdev_tx_t ret;
605 	char *data;
606 	int len;
607 
608 	ret = NETDEV_TX_OK;
609 	is_multi = false;
610 	cur_queue = netdev_get_tx_queue(netdev, queue_no);
611 
612 	eth = (struct ethhdr *)skb->data;
613 	my_epid = hw->my_epid;
614 
615 	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
616 
617 	data = skb->data;
618 	len = skb->len;
619 
620 	if (is_multicast_ether_addr(eth->h_dest)) {
621 		dest_epid = 0;
622 		max_epid = hw->max_epid;
623 		is_multi = true;
624 	} else if (is_local_ether_addr(eth->h_dest)) {
625 		dest_epid = eth->h_dest[ETH_ALEN - 1];
626 		max_epid = dest_epid + 1;
627 
628 		if ((eth->h_dest[0] == 0x02) &&
629 		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
630 			      eth->h_dest[3] | eth->h_dest[4])) &&
631 		    (dest_epid < hw->max_epid)) {
632 			;
633 		} else {
634 			dest_epid = 0;
635 			max_epid = 0;
636 			ret = NETDEV_TX_OK;
637 
638 			adapter->stats64.tx_packets += 1;
639 			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
640 			adapter->stats64.tx_bytes += len;
641 			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
642 		}
643 	} else {
644 		dest_epid = 0;
645 		max_epid = 0;
646 		ret = NETDEV_TX_OK;
647 
648 		adapter->stats64.tx_packets += 1;
649 		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
650 		adapter->stats64.tx_bytes += len;
651 		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
652 	}
653 
654 	for (; dest_epid < max_epid; dest_epid++) {
655 		if (my_epid == dest_epid)
656 			continue;
657 
658 		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
659 		if (pstatus != EP_PARTNER_SHARED) {
660 			if (!is_multi)
661 				hw->ep_shm_info[dest_epid].ep_stats
662 					.tx_dropped_not_shared += 1;
663 			ret = NETDEV_TX_OK;
664 		} else if (!fjes_hw_check_epbuf_version(
665 				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
666 			/* version is NOT 0 */
667 			adapter->stats64.tx_carrier_errors += 1;
668 			hw->ep_shm_info[dest_epid].net_stats
669 						.tx_carrier_errors += 1;
670 			hw->ep_shm_info[dest_epid].ep_stats
671 					.tx_dropped_ver_mismatch += 1;
672 
673 			ret = NETDEV_TX_OK;
674 		} else if (!fjes_hw_check_mtu(
675 				&adapter->hw.ep_shm_info[dest_epid].rx,
676 				netdev->mtu)) {
677 			adapter->stats64.tx_dropped += 1;
678 			hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
679 			adapter->stats64.tx_errors += 1;
680 			hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
681 			hw->ep_shm_info[dest_epid].ep_stats
682 					.tx_dropped_buf_size_mismatch += 1;
683 
684 			ret = NETDEV_TX_OK;
685 		} else if (vlan &&
686 			   !fjes_hw_check_vlan_id(
687 				&adapter->hw.ep_shm_info[dest_epid].rx,
688 				vlan_id)) {
689 			hw->ep_shm_info[dest_epid].ep_stats
690 				.tx_dropped_vlanid_mismatch += 1;
691 			ret = NETDEV_TX_OK;
692 		} else {
693 			if (len < VLAN_ETH_HLEN) {
694 				memset(shortpkt, 0, VLAN_ETH_HLEN);
695 				memcpy(shortpkt, skb->data, skb->len);
696 				len = VLAN_ETH_HLEN;
697 				data = shortpkt;
698 			}
699 
700 			if (adapter->tx_retry_count == 0) {
701 				adapter->tx_start_jiffies = jiffies;
702 				adapter->tx_retry_count = 1;
703 			} else {
704 				adapter->tx_retry_count++;
705 			}
706 
707 			if (fjes_tx_send(adapter, dest_epid, data, len)) {
708 				if (is_multi) {
709 					ret = NETDEV_TX_OK;
710 				} else if (
711 					   ((long)jiffies -
712 					    (long)adapter->tx_start_jiffies) >=
713 					    FJES_TX_RETRY_TIMEOUT) {
714 					adapter->stats64.tx_fifo_errors += 1;
715 					hw->ep_shm_info[dest_epid].net_stats
716 								.tx_fifo_errors += 1;
717 					adapter->stats64.tx_errors += 1;
718 					hw->ep_shm_info[dest_epid].net_stats
719 								.tx_errors += 1;
720 
721 					ret = NETDEV_TX_OK;
722 				} else {
723 					netif_trans_update(netdev);
724 					hw->ep_shm_info[dest_epid].ep_stats
725 						.tx_buffer_full += 1;
726 					netif_tx_stop_queue(cur_queue);
727 
728 					if (!work_pending(&adapter->tx_stall_task))
729 						queue_work(adapter->txrx_wq,
730 							   &adapter->tx_stall_task);
731 
732 					ret = NETDEV_TX_BUSY;
733 				}
734 			} else {
735 				if (!is_multi) {
736 					adapter->stats64.tx_packets += 1;
737 					hw->ep_shm_info[dest_epid].net_stats
738 								.tx_packets += 1;
739 					adapter->stats64.tx_bytes += len;
740 					hw->ep_shm_info[dest_epid].net_stats
741 								.tx_bytes += len;
742 				}
743 
744 				adapter->tx_retry_count = 0;
745 				ret = NETDEV_TX_OK;
746 			}
747 		}
748 	}
749 
750 	if (ret == NETDEV_TX_OK) {
751 		dev_kfree_skb(skb);
752 		if (is_multi) {
753 			adapter->stats64.tx_packets += 1;
754 			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
755 			adapter->stats64.tx_bytes += 1;
756 			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
757 		}
758 	}
759 
760 	return ret;
761 }
762 
763 static void
764 fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
765 {
766 	struct fjes_adapter *adapter = netdev_priv(netdev);
767 
768 	memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
769 }
770 
771 static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
772 {
773 	struct fjes_adapter *adapter = netdev_priv(netdev);
774 	bool running = netif_running(netdev);
775 	struct fjes_hw *hw = &adapter->hw;
776 	unsigned long flags;
777 	int ret = -EINVAL;
778 	int idx, epidx;
779 
780 	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
781 		if (new_mtu <= fjes_support_mtu[idx]) {
782 			new_mtu = fjes_support_mtu[idx];
783 			if (new_mtu == netdev->mtu)
784 				return 0;
785 
786 			ret = 0;
787 			break;
788 		}
789 	}
790 
791 	if (ret)
792 		return ret;
793 
794 	if (running) {
795 		spin_lock_irqsave(&hw->rx_status_lock, flags);
796 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
797 			if (epidx == hw->my_epid)
798 				continue;
799 			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
800 				~FJES_RX_MTU_CHANGING_DONE;
801 		}
802 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
803 
804 		netif_tx_stop_all_queues(netdev);
805 		netif_carrier_off(netdev);
806 		cancel_work_sync(&adapter->tx_stall_task);
807 		napi_disable(&adapter->napi);
808 
809 		msleep(1000);
810 
811 		netif_tx_stop_all_queues(netdev);
812 	}
813 
814 	netdev->mtu = new_mtu;
815 
816 	if (running) {
817 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
818 			if (epidx == hw->my_epid)
819 				continue;
820 
821 			spin_lock_irqsave(&hw->rx_status_lock, flags);
822 			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
823 					    netdev->dev_addr,
824 					    netdev->mtu);
825 
826 			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
827 				FJES_RX_MTU_CHANGING_DONE;
828 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
829 		}
830 
831 		netif_tx_wake_all_queues(netdev);
832 		netif_carrier_on(netdev);
833 		napi_enable(&adapter->napi);
834 		napi_schedule(&adapter->napi);
835 	}
836 
837 	return ret;
838 }
839 
840 static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
841 {
842 	struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
843 
844 	netif_tx_wake_queue(queue);
845 }
846 
847 static int fjes_vlan_rx_add_vid(struct net_device *netdev,
848 				__be16 proto, u16 vid)
849 {
850 	struct fjes_adapter *adapter = netdev_priv(netdev);
851 	bool ret = true;
852 	int epid;
853 
854 	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
855 		if (epid == adapter->hw.my_epid)
856 			continue;
857 
858 		if (!fjes_hw_check_vlan_id(
859 			&adapter->hw.ep_shm_info[epid].tx, vid))
860 			ret = fjes_hw_set_vlan_id(
861 				&adapter->hw.ep_shm_info[epid].tx, vid);
862 	}
863 
864 	return ret ? 0 : -ENOSPC;
865 }
866 
867 static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
868 				 __be16 proto, u16 vid)
869 {
870 	struct fjes_adapter *adapter = netdev_priv(netdev);
871 	int epid;
872 
873 	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
874 		if (epid == adapter->hw.my_epid)
875 			continue;
876 
877 		fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
878 	}
879 
880 	return 0;
881 }
882 
883 static const struct net_device_ops fjes_netdev_ops = {
884 	.ndo_open		= fjes_open,
885 	.ndo_stop		= fjes_close,
886 	.ndo_start_xmit		= fjes_xmit_frame,
887 	.ndo_get_stats64	= fjes_get_stats64,
888 	.ndo_change_mtu		= fjes_change_mtu,
889 	.ndo_tx_timeout		= fjes_tx_retry,
890 	.ndo_vlan_rx_add_vid	= fjes_vlan_rx_add_vid,
891 	.ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
892 };
893 
894 /* fjes_netdev_setup - netdevice initialization routine */
895 static void fjes_netdev_setup(struct net_device *netdev)
896 {
897 	ether_setup(netdev);
898 
899 	netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
900 	netdev->netdev_ops = &fjes_netdev_ops;
901 	fjes_set_ethtool_ops(netdev);
902 	netdev->mtu = fjes_support_mtu[3];
903 	netdev->min_mtu = fjes_support_mtu[0];
904 	netdev->max_mtu = fjes_support_mtu[3];
905 	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
906 }
907 
908 static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
909 				     int start_epid)
910 {
911 	struct fjes_hw *hw = &adapter->hw;
912 	enum ep_partner_status pstatus;
913 	int max_epid, cur_epid;
914 	int i;
915 
916 	max_epid = hw->max_epid;
917 	start_epid = (start_epid + 1 + max_epid) % max_epid;
918 
919 	for (i = 0; i < max_epid; i++) {
920 		cur_epid = (start_epid + i) % max_epid;
921 		if (cur_epid == hw->my_epid)
922 			continue;
923 
924 		pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
925 		if (pstatus == EP_PARTNER_SHARED) {
926 			if (!fjes_hw_epbuf_rx_is_empty(
927 				&hw->ep_shm_info[cur_epid].rx))
928 				return cur_epid;
929 		}
930 	}
931 	return -1;
932 }
933 
934 static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
935 			      int *cur_epid)
936 {
937 	void *frame;
938 
939 	*cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
940 	if (*cur_epid < 0)
941 		return NULL;
942 
943 	frame =
944 	fjes_hw_epbuf_rx_curpkt_get_addr(
945 		&adapter->hw.ep_shm_info[*cur_epid].rx, psize);
946 
947 	return frame;
948 }
949 
950 static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
951 {
952 	fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
953 }
954 
955 static int fjes_poll(struct napi_struct *napi, int budget)
956 {
957 	struct fjes_adapter *adapter =
958 			container_of(napi, struct fjes_adapter, napi);
959 	struct net_device *netdev = napi->dev;
960 	struct fjes_hw *hw = &adapter->hw;
961 	struct sk_buff *skb;
962 	int work_done = 0;
963 	int cur_epid = 0;
964 	int epidx;
965 	size_t frame_len;
966 	void *frame;
967 
968 	spin_lock(&hw->rx_status_lock);
969 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
970 		if (epidx == hw->my_epid)
971 			continue;
972 
973 		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
974 		    EP_PARTNER_SHARED)
975 			adapter->hw.ep_shm_info[epidx]
976 				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
977 	}
978 	spin_unlock(&hw->rx_status_lock);
979 
980 	while (work_done < budget) {
981 		prefetch(&adapter->hw);
982 		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
983 
984 		if (frame) {
985 			skb = napi_alloc_skb(napi, frame_len);
986 			if (!skb) {
987 				adapter->stats64.rx_dropped += 1;
988 				hw->ep_shm_info[cur_epid].net_stats
989 							 .rx_dropped += 1;
990 				adapter->stats64.rx_errors += 1;
991 				hw->ep_shm_info[cur_epid].net_stats
992 							 .rx_errors += 1;
993 			} else {
994 				skb_put_data(skb, frame, frame_len);
995 				skb->protocol = eth_type_trans(skb, netdev);
996 				skb->ip_summed = CHECKSUM_UNNECESSARY;
997 
998 				netif_receive_skb(skb);
999 
1000 				work_done++;
1001 
1002 				adapter->stats64.rx_packets += 1;
1003 				hw->ep_shm_info[cur_epid].net_stats
1004 							 .rx_packets += 1;
1005 				adapter->stats64.rx_bytes += frame_len;
1006 				hw->ep_shm_info[cur_epid].net_stats
1007 							 .rx_bytes += frame_len;
1008 
1009 				if (is_multicast_ether_addr(
1010 					((struct ethhdr *)frame)->h_dest)) {
1011 					adapter->stats64.multicast += 1;
1012 					hw->ep_shm_info[cur_epid].net_stats
1013 								 .multicast += 1;
1014 				}
1015 			}
1016 
1017 			fjes_rxframe_release(adapter, cur_epid);
1018 			adapter->unset_rx_last = true;
1019 		} else {
1020 			break;
1021 		}
1022 	}
1023 
1024 	if (work_done < budget) {
1025 		napi_complete_done(napi, work_done);
1026 
1027 		if (adapter->unset_rx_last) {
1028 			adapter->rx_last_jiffies = jiffies;
1029 			adapter->unset_rx_last = false;
1030 		}
1031 
1032 		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
1033 			napi_schedule(napi);
1034 		} else {
1035 			spin_lock(&hw->rx_status_lock);
1036 			for (epidx = 0; epidx < hw->max_epid; epidx++) {
1037 				if (epidx == hw->my_epid)
1038 					continue;
1039 				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1040 				    EP_PARTNER_SHARED)
1041 					adapter->hw.ep_shm_info[epidx].tx
1042 						   .info->v1i.rx_status &=
1043 						~FJES_RX_POLL_WORK;
1044 			}
1045 			spin_unlock(&hw->rx_status_lock);
1046 
1047 			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
1048 		}
1049 	}
1050 
1051 	return work_done;
1052 }
1053 
1054 static int fjes_sw_init(struct fjes_adapter *adapter)
1055 {
1056 	struct net_device *netdev = adapter->netdev;
1057 
1058 	netif_napi_add(netdev, &adapter->napi, fjes_poll);
1059 
1060 	return 0;
1061 }
1062 
1063 static void fjes_force_close_task(struct work_struct *work)
1064 {
1065 	struct fjes_adapter *adapter = container_of(work,
1066 			struct fjes_adapter, force_close_task);
1067 	struct net_device *netdev = adapter->netdev;
1068 
1069 	rtnl_lock();
1070 	dev_close(netdev);
1071 	rtnl_unlock();
1072 }
1073 
1074 static void fjes_tx_stall_task(struct work_struct *work)
1075 {
1076 	struct fjes_adapter *adapter = container_of(work,
1077 			struct fjes_adapter, tx_stall_task);
1078 	struct net_device *netdev = adapter->netdev;
1079 	struct fjes_hw *hw = &adapter->hw;
1080 	int all_queue_available, sendable;
1081 	enum ep_partner_status pstatus;
1082 	int max_epid, my_epid, epid;
1083 	union ep_buffer_info *info;
1084 	int i;
1085 
1086 	if (((long)jiffies -
1087 		dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
1088 		netif_wake_queue(netdev);
1089 		return;
1090 	}
1091 
1092 	my_epid = hw->my_epid;
1093 	max_epid = hw->max_epid;
1094 
1095 	for (i = 0; i < 5; i++) {
1096 		all_queue_available = 1;
1097 
1098 		for (epid = 0; epid < max_epid; epid++) {
1099 			if (my_epid == epid)
1100 				continue;
1101 
1102 			pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1103 			sendable = (pstatus == EP_PARTNER_SHARED);
1104 			if (!sendable)
1105 				continue;
1106 
1107 			info = adapter->hw.ep_shm_info[epid].tx.info;
1108 
1109 			if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
1110 				return;
1111 
1112 			if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
1113 					 info->v1i.count_max)) {
1114 				all_queue_available = 0;
1115 				break;
1116 			}
1117 		}
1118 
1119 		if (all_queue_available) {
1120 			netif_wake_queue(netdev);
1121 			return;
1122 		}
1123 	}
1124 
1125 	usleep_range(50, 100);
1126 
1127 	queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
1128 }
1129 
1130 static void fjes_raise_intr_rxdata_task(struct work_struct *work)
1131 {
1132 	struct fjes_adapter *adapter = container_of(work,
1133 			struct fjes_adapter, raise_intr_rxdata_task);
1134 	struct fjes_hw *hw = &adapter->hw;
1135 	enum ep_partner_status pstatus;
1136 	int max_epid, my_epid, epid;
1137 
1138 	my_epid = hw->my_epid;
1139 	max_epid = hw->max_epid;
1140 
1141 	for (epid = 0; epid < max_epid; epid++)
1142 		hw->ep_shm_info[epid].tx_status_work = 0;
1143 
1144 	for (epid = 0; epid < max_epid; epid++) {
1145 		if (epid == my_epid)
1146 			continue;
1147 
1148 		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1149 		if (pstatus == EP_PARTNER_SHARED) {
1150 			hw->ep_shm_info[epid].tx_status_work =
1151 				hw->ep_shm_info[epid].tx.info->v1i.tx_status;
1152 
1153 			if (hw->ep_shm_info[epid].tx_status_work ==
1154 				FJES_TX_DELAY_SEND_PENDING) {
1155 				hw->ep_shm_info[epid].tx.info->v1i.tx_status =
1156 					FJES_TX_DELAY_SEND_NONE;
1157 			}
1158 		}
1159 	}
1160 
1161 	for (epid = 0; epid < max_epid; epid++) {
1162 		if (epid == my_epid)
1163 			continue;
1164 
1165 		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1166 		if ((hw->ep_shm_info[epid].tx_status_work ==
1167 		     FJES_TX_DELAY_SEND_PENDING) &&
1168 		    (pstatus == EP_PARTNER_SHARED) &&
1169 		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
1170 		      FJES_RX_POLL_WORK)) {
1171 			fjes_hw_raise_interrupt(hw, epid,
1172 						REG_ICTL_MASK_RX_DATA);
1173 			hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
1174 		}
1175 	}
1176 
1177 	usleep_range(500, 1000);
1178 }
1179 
1180 static void fjes_watch_unshare_task(struct work_struct *work)
1181 {
1182 	struct fjes_adapter *adapter =
1183 	container_of(work, struct fjes_adapter, unshare_watch_task);
1184 
1185 	struct net_device *netdev = adapter->netdev;
1186 	struct fjes_hw *hw = &adapter->hw;
1187 
1188 	int unshare_watch, unshare_reserve;
1189 	int max_epid, my_epid, epidx;
1190 	int stop_req, stop_req_done;
1191 	ulong unshare_watch_bitmask;
1192 	unsigned long flags;
1193 	int wait_time = 0;
1194 	int is_shared;
1195 	int ret;
1196 
1197 	my_epid = hw->my_epid;
1198 	max_epid = hw->max_epid;
1199 
1200 	unshare_watch_bitmask = adapter->unshare_watch_bitmask;
1201 	adapter->unshare_watch_bitmask = 0;
1202 
1203 	while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
1204 	       (wait_time < 3000)) {
1205 		for (epidx = 0; epidx < max_epid; epidx++) {
1206 			if (epidx == my_epid)
1207 				continue;
1208 
1209 			is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
1210 							   epidx);
1211 
1212 			stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
1213 
1214 			stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
1215 					FJES_RX_STOP_REQ_DONE;
1216 
1217 			unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
1218 
1219 			unshare_reserve = test_bit(epidx,
1220 						   &hw->hw_info.buffer_unshare_reserve_bit);
1221 
1222 			if ((!stop_req ||
1223 			     (is_shared && (!is_shared || !stop_req_done))) &&
1224 			    (is_shared || !unshare_watch || !unshare_reserve))
1225 				continue;
1226 
1227 			mutex_lock(&hw->hw_info.lock);
1228 			ret = fjes_hw_unregister_buff_addr(hw, epidx);
1229 			switch (ret) {
1230 			case 0:
1231 				break;
1232 			case -ENOMSG:
1233 			case -EBUSY:
1234 			default:
1235 				if (!work_pending(
1236 					&adapter->force_close_task)) {
1237 					adapter->force_reset = true;
1238 					schedule_work(
1239 						&adapter->force_close_task);
1240 				}
1241 				break;
1242 			}
1243 			mutex_unlock(&hw->hw_info.lock);
1244 			hw->ep_shm_info[epidx].ep_stats
1245 					.com_unregist_buf_exec += 1;
1246 
1247 			spin_lock_irqsave(&hw->rx_status_lock, flags);
1248 			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1249 					    netdev->dev_addr, netdev->mtu);
1250 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1251 
1252 			clear_bit(epidx, &hw->txrx_stop_req_bit);
1253 			clear_bit(epidx, &unshare_watch_bitmask);
1254 			clear_bit(epidx,
1255 				  &hw->hw_info.buffer_unshare_reserve_bit);
1256 		}
1257 
1258 		msleep(100);
1259 		wait_time += 100;
1260 	}
1261 
1262 	if (hw->hw_info.buffer_unshare_reserve_bit) {
1263 		for (epidx = 0; epidx < max_epid; epidx++) {
1264 			if (epidx == my_epid)
1265 				continue;
1266 
1267 			if (test_bit(epidx,
1268 				     &hw->hw_info.buffer_unshare_reserve_bit)) {
1269 				mutex_lock(&hw->hw_info.lock);
1270 
1271 				ret = fjes_hw_unregister_buff_addr(hw, epidx);
1272 				switch (ret) {
1273 				case 0:
1274 					break;
1275 				case -ENOMSG:
1276 				case -EBUSY:
1277 				default:
1278 					if (!work_pending(
1279 						&adapter->force_close_task)) {
1280 						adapter->force_reset = true;
1281 						schedule_work(
1282 							&adapter->force_close_task);
1283 					}
1284 					break;
1285 				}
1286 				mutex_unlock(&hw->hw_info.lock);
1287 
1288 				hw->ep_shm_info[epidx].ep_stats
1289 					.com_unregist_buf_exec += 1;
1290 
1291 				spin_lock_irqsave(&hw->rx_status_lock, flags);
1292 				fjes_hw_setup_epbuf(
1293 					&hw->ep_shm_info[epidx].tx,
1294 					netdev->dev_addr, netdev->mtu);
1295 				spin_unlock_irqrestore(&hw->rx_status_lock,
1296 						       flags);
1297 
1298 				clear_bit(epidx, &hw->txrx_stop_req_bit);
1299 				clear_bit(epidx, &unshare_watch_bitmask);
1300 				clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1301 			}
1302 
1303 			if (test_bit(epidx, &unshare_watch_bitmask)) {
1304 				spin_lock_irqsave(&hw->rx_status_lock, flags);
1305 				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
1306 						~FJES_RX_STOP_REQ_DONE;
1307 				spin_unlock_irqrestore(&hw->rx_status_lock,
1308 						       flags);
1309 			}
1310 		}
1311 	}
1312 }
1313 
1314 static void fjes_irq_watch_task(struct work_struct *work)
1315 {
1316 	struct fjes_adapter *adapter = container_of(to_delayed_work(work),
1317 			struct fjes_adapter, interrupt_watch_task);
1318 
1319 	local_irq_disable();
1320 	fjes_intr(adapter->hw.hw_res.irq, adapter);
1321 	local_irq_enable();
1322 
1323 	if (fjes_rxframe_search_exist(adapter, 0) >= 0)
1324 		napi_schedule(&adapter->napi);
1325 
1326 	if (adapter->interrupt_watch_enable) {
1327 		if (!delayed_work_pending(&adapter->interrupt_watch_task))
1328 			queue_delayed_work(adapter->control_wq,
1329 					   &adapter->interrupt_watch_task,
1330 					   FJES_IRQ_WATCH_DELAY);
1331 	}
1332 }
1333 
1334 /* fjes_probe - Device Initialization Routine */
1335 static int fjes_probe(struct platform_device *plat_dev)
1336 {
1337 	struct fjes_adapter *adapter;
1338 	struct net_device *netdev;
1339 	struct resource *res;
1340 	struct fjes_hw *hw;
1341 	u8 addr[ETH_ALEN];
1342 	int err;
1343 
1344 	err = -ENOMEM;
1345 	netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
1346 				 NET_NAME_UNKNOWN, fjes_netdev_setup,
1347 				 FJES_MAX_QUEUES);
1348 
1349 	if (!netdev)
1350 		goto err_out;
1351 
1352 	SET_NETDEV_DEV(netdev, &plat_dev->dev);
1353 
1354 	dev_set_drvdata(&plat_dev->dev, netdev);
1355 	adapter = netdev_priv(netdev);
1356 	adapter->netdev = netdev;
1357 	adapter->plat_dev = plat_dev;
1358 	hw = &adapter->hw;
1359 	hw->back = adapter;
1360 
1361 	/* setup the private structure */
1362 	err = fjes_sw_init(adapter);
1363 	if (err)
1364 		goto err_free_netdev;
1365 
1366 	INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
1367 	adapter->force_reset = false;
1368 	adapter->open_guard = false;
1369 
1370 	adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1371 	if (unlikely(!adapter->txrx_wq)) {
1372 		err = -ENOMEM;
1373 		goto err_free_netdev;
1374 	}
1375 
1376 	adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1377 					      WQ_MEM_RECLAIM, 0);
1378 	if (unlikely(!adapter->control_wq)) {
1379 		err = -ENOMEM;
1380 		goto err_free_txrx_wq;
1381 	}
1382 
1383 	INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1384 	INIT_WORK(&adapter->raise_intr_rxdata_task,
1385 		  fjes_raise_intr_rxdata_task);
1386 	INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
1387 	adapter->unshare_watch_bitmask = 0;
1388 
1389 	INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
1390 	adapter->interrupt_watch_enable = false;
1391 
1392 	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
1393 	if (!res) {
1394 		err = -EINVAL;
1395 		goto err_free_control_wq;
1396 	}
1397 	hw->hw_res.start = res->start;
1398 	hw->hw_res.size = resource_size(res);
1399 	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1400 	if (hw->hw_res.irq < 0) {
1401 		err = hw->hw_res.irq;
1402 		goto err_free_control_wq;
1403 	}
1404 
1405 	err = fjes_hw_init(&adapter->hw);
1406 	if (err)
1407 		goto err_free_control_wq;
1408 
1409 	/* setup MAC address (02:00:00:00:00:[epid])*/
1410 	addr[0] = 2;
1411 	addr[1] = 0;
1412 	addr[2] = 0;
1413 	addr[3] = 0;
1414 	addr[4] = 0;
1415 	addr[5] = hw->my_epid; /* EPID */
1416 	eth_hw_addr_set(netdev, addr);
1417 
1418 	err = register_netdev(netdev);
1419 	if (err)
1420 		goto err_hw_exit;
1421 
1422 	netif_carrier_off(netdev);
1423 
1424 	fjes_dbg_adapter_init(adapter);
1425 
1426 	return 0;
1427 
1428 err_hw_exit:
1429 	fjes_hw_exit(&adapter->hw);
1430 err_free_control_wq:
1431 	destroy_workqueue(adapter->control_wq);
1432 err_free_txrx_wq:
1433 	destroy_workqueue(adapter->txrx_wq);
1434 err_free_netdev:
1435 	free_netdev(netdev);
1436 err_out:
1437 	return err;
1438 }
1439 
1440 /* fjes_remove - Device Removal Routine */
1441 static void fjes_remove(struct platform_device *plat_dev)
1442 {
1443 	struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
1444 	struct fjes_adapter *adapter = netdev_priv(netdev);
1445 	struct fjes_hw *hw = &adapter->hw;
1446 
1447 	fjes_dbg_adapter_exit(adapter);
1448 
1449 	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
1450 	cancel_work_sync(&adapter->unshare_watch_task);
1451 	cancel_work_sync(&adapter->raise_intr_rxdata_task);
1452 	cancel_work_sync(&adapter->tx_stall_task);
1453 	if (adapter->control_wq)
1454 		destroy_workqueue(adapter->control_wq);
1455 	if (adapter->txrx_wq)
1456 		destroy_workqueue(adapter->txrx_wq);
1457 
1458 	unregister_netdev(netdev);
1459 
1460 	fjes_hw_exit(hw);
1461 
1462 	netif_napi_del(&adapter->napi);
1463 
1464 	free_netdev(netdev);
1465 }
1466 
1467 static struct platform_driver fjes_driver = {
1468 	.driver = {
1469 		.name = DRV_NAME,
1470 	},
1471 	.probe = fjes_probe,
1472 	.remove_new = fjes_remove,
1473 };
1474 
1475 static acpi_status
1476 acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
1477 				 void *context, void **return_value)
1478 {
1479 	struct acpi_device *device;
1480 	bool *found = context;
1481 
1482 	device = acpi_fetch_acpi_dev(obj_handle);
1483 	if (!device)
1484 		return AE_OK;
1485 
1486 	if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
1487 		return AE_OK;
1488 
1489 	if (!is_extended_socket_device(device))
1490 		return AE_OK;
1491 
1492 	if (acpi_check_extended_socket_status(device))
1493 		return AE_OK;
1494 
1495 	*found = true;
1496 	return AE_CTRL_TERMINATE;
1497 }
1498 
1499 /* fjes_init_module - Driver Registration Routine */
1500 static int __init fjes_init_module(void)
1501 {
1502 	bool found = false;
1503 	int result;
1504 
1505 	acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
1506 			    acpi_find_extended_socket_device, NULL, &found,
1507 			    NULL);
1508 
1509 	if (!found)
1510 		return -ENODEV;
1511 
1512 	pr_info("%s - version %s - %s\n",
1513 		fjes_driver_string, fjes_driver_version, fjes_copyright);
1514 
1515 	fjes_dbg_init();
1516 
1517 	result = platform_driver_register(&fjes_driver);
1518 	if (result < 0) {
1519 		fjes_dbg_exit();
1520 		return result;
1521 	}
1522 
1523 	result = acpi_bus_register_driver(&fjes_acpi_driver);
1524 	if (result < 0)
1525 		goto fail_acpi_driver;
1526 
1527 	return 0;
1528 
1529 fail_acpi_driver:
1530 	platform_driver_unregister(&fjes_driver);
1531 	fjes_dbg_exit();
1532 	return result;
1533 }
1534 
1535 module_init(fjes_init_module);
1536 
1537 /* fjes_exit_module - Driver Exit Cleanup Routine */
1538 static void __exit fjes_exit_module(void)
1539 {
1540 	acpi_bus_unregister_driver(&fjes_acpi_driver);
1541 	platform_driver_unregister(&fjes_driver);
1542 	fjes_dbg_exit();
1543 }
1544 
1545 module_exit(fjes_exit_module);
1546