xref: /linux/drivers/net/fjes/fjes_main.c (revision 4ce06406958b67fdddcc2e6948237dd6ff6ba112)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  FUJITSU Extended Socket Network Device driver
4  *  Copyright (c) 2015 FUJITSU LIMITED
5  */
6 
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/nls.h>
10 #include <linux/platform_device.h>
11 #include <linux/netdevice.h>
12 #include <linux/interrupt.h>
13 
14 #include "fjes.h"
15 #include "fjes_trace.h"
16 
17 #define DRV_VERSION "1.2"
18 #define DRV_NAME	"fjes"
19 char fjes_driver_name[] = DRV_NAME;
20 char fjes_driver_version[] = DRV_VERSION;
21 static const char fjes_driver_string[] =
22 		"FUJITSU Extended Socket Network Device Driver";
23 static const char fjes_copyright[] =
24 		"Copyright (c) 2015 FUJITSU LIMITED";
25 
26 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
27 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
28 MODULE_LICENSE("GPL");
29 MODULE_VERSION(DRV_VERSION);
30 
31 #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
32 
33 static const struct acpi_device_id fjes_acpi_ids[] = {
34 	{ACPI_MOTHERBOARD_RESOURCE_HID, 0},
35 	{"", 0},
36 };
37 MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
38 
39 static bool is_extended_socket_device(struct acpi_device *device)
40 {
41 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
42 	char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
43 	union acpi_object *str;
44 	acpi_status status;
45 	int result;
46 
47 	status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
48 	if (ACPI_FAILURE(status))
49 		return false;
50 
51 	str = buffer.pointer;
52 	result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
53 				 str->string.length, UTF16_LITTLE_ENDIAN,
54 				 str_buf, sizeof(str_buf) - 1);
55 	str_buf[result] = 0;
56 
57 	if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
58 		kfree(buffer.pointer);
59 		return false;
60 	}
61 	kfree(buffer.pointer);
62 
63 	return true;
64 }
65 
66 static int acpi_check_extended_socket_status(struct acpi_device *device)
67 {
68 	unsigned long long sta;
69 	acpi_status status;
70 
71 	status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
72 	if (ACPI_FAILURE(status))
73 		return -ENODEV;
74 
75 	if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
76 	      (sta & ACPI_STA_DEVICE_ENABLED) &&
77 	      (sta & ACPI_STA_DEVICE_UI) &&
78 	      (sta & ACPI_STA_DEVICE_FUNCTIONING)))
79 		return -ENODEV;
80 
81 	return 0;
82 }
83 
84 static acpi_status
85 fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
86 {
87 	struct acpi_resource_address32 *addr;
88 	struct acpi_resource_irq *irq;
89 	struct resource *res = data;
90 
91 	switch (acpi_res->type) {
92 	case ACPI_RESOURCE_TYPE_ADDRESS32:
93 		addr = &acpi_res->data.address32;
94 		res[0].start = addr->address.minimum;
95 		res[0].end = addr->address.minimum +
96 			addr->address.address_length - 1;
97 		break;
98 
99 	case ACPI_RESOURCE_TYPE_IRQ:
100 		irq = &acpi_res->data.irq;
101 		if (irq->interrupt_count != 1)
102 			return AE_ERROR;
103 		res[1].start = irq->interrupts[0];
104 		res[1].end = irq->interrupts[0];
105 		break;
106 
107 	default:
108 		break;
109 	}
110 
111 	return AE_OK;
112 }
113 
114 static int fjes_setup_resources(struct fjes_adapter *adapter)
115 {
116 	struct net_device *netdev = adapter->netdev;
117 	struct ep_share_mem_info *buf_pair;
118 	struct fjes_hw *hw = &adapter->hw;
119 	unsigned long flags;
120 	int result;
121 	int epidx;
122 
123 	mutex_lock(&hw->hw_info.lock);
124 	result = fjes_hw_request_info(hw);
125 	switch (result) {
126 	case 0:
127 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
128 			hw->ep_shm_info[epidx].es_status =
129 			    hw->hw_info.res_buf->info.info[epidx].es_status;
130 			hw->ep_shm_info[epidx].zone =
131 			    hw->hw_info.res_buf->info.info[epidx].zone;
132 		}
133 		break;
134 	default:
135 	case -ENOMSG:
136 	case -EBUSY:
137 		adapter->force_reset = true;
138 
139 		mutex_unlock(&hw->hw_info.lock);
140 		return result;
141 	}
142 	mutex_unlock(&hw->hw_info.lock);
143 
144 	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
145 		if ((epidx != hw->my_epid) &&
146 		    (hw->ep_shm_info[epidx].es_status ==
147 		     FJES_ZONING_STATUS_ENABLE)) {
148 			fjes_hw_raise_interrupt(hw, epidx,
149 						REG_ICTL_MASK_INFO_UPDATE);
150 			hw->ep_shm_info[epidx].ep_stats
151 				.send_intr_zoneupdate += 1;
152 		}
153 	}
154 
155 	msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
156 
157 	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
158 		if (epidx == hw->my_epid)
159 			continue;
160 
161 		buf_pair = &hw->ep_shm_info[epidx];
162 
163 		spin_lock_irqsave(&hw->rx_status_lock, flags);
164 		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
165 				    netdev->mtu);
166 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
167 
168 		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
169 			mutex_lock(&hw->hw_info.lock);
170 			result =
171 			fjes_hw_register_buff_addr(hw, epidx, buf_pair);
172 			mutex_unlock(&hw->hw_info.lock);
173 
174 			switch (result) {
175 			case 0:
176 				break;
177 			case -ENOMSG:
178 			case -EBUSY:
179 			default:
180 				adapter->force_reset = true;
181 				return result;
182 			}
183 
184 			hw->ep_shm_info[epidx].ep_stats
185 				.com_regist_buf_exec += 1;
186 		}
187 	}
188 
189 	return 0;
190 }
191 
192 static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
193 {
194 	struct fjes_hw *hw = &adapter->hw;
195 
196 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
197 
198 	adapter->unset_rx_last = true;
199 	napi_schedule(&adapter->napi);
200 }
201 
202 static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
203 {
204 	struct fjes_hw *hw = &adapter->hw;
205 	enum ep_partner_status status;
206 	unsigned long flags;
207 
208 	set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
209 
210 	status = fjes_hw_get_partner_ep_status(hw, src_epid);
211 	trace_fjes_stop_req_irq_pre(hw, src_epid, status);
212 	switch (status) {
213 	case EP_PARTNER_WAITING:
214 		spin_lock_irqsave(&hw->rx_status_lock, flags);
215 		hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
216 				FJES_RX_STOP_REQ_DONE;
217 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
218 		clear_bit(src_epid, &hw->txrx_stop_req_bit);
219 		fallthrough;
220 	case EP_PARTNER_UNSHARE:
221 	case EP_PARTNER_COMPLETE:
222 	default:
223 		set_bit(src_epid, &adapter->unshare_watch_bitmask);
224 		if (!work_pending(&adapter->unshare_watch_task))
225 			queue_work(adapter->control_wq,
226 				   &adapter->unshare_watch_task);
227 		break;
228 	case EP_PARTNER_SHARED:
229 		set_bit(src_epid, &hw->epstop_req_bit);
230 
231 		if (!work_pending(&hw->epstop_task))
232 			queue_work(adapter->control_wq, &hw->epstop_task);
233 		break;
234 	}
235 	trace_fjes_stop_req_irq_post(hw, src_epid);
236 }
237 
238 static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
239 				   int src_epid)
240 {
241 	struct fjes_hw *hw = &adapter->hw;
242 	enum ep_partner_status status;
243 	unsigned long flags;
244 
245 	status = fjes_hw_get_partner_ep_status(hw, src_epid);
246 	trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
247 	switch (status) {
248 	case EP_PARTNER_UNSHARE:
249 	case EP_PARTNER_COMPLETE:
250 	default:
251 		break;
252 	case EP_PARTNER_WAITING:
253 		if (src_epid < hw->my_epid) {
254 			spin_lock_irqsave(&hw->rx_status_lock, flags);
255 			hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
256 				FJES_RX_STOP_REQ_DONE;
257 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
258 
259 			clear_bit(src_epid, &hw->txrx_stop_req_bit);
260 			set_bit(src_epid, &adapter->unshare_watch_bitmask);
261 
262 			if (!work_pending(&adapter->unshare_watch_task))
263 				queue_work(adapter->control_wq,
264 					   &adapter->unshare_watch_task);
265 		}
266 		break;
267 	case EP_PARTNER_SHARED:
268 		if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
269 		    FJES_RX_STOP_REQ_REQUEST) {
270 			set_bit(src_epid, &hw->epstop_req_bit);
271 			if (!work_pending(&hw->epstop_task))
272 				queue_work(adapter->control_wq,
273 					   &hw->epstop_task);
274 		}
275 		break;
276 	}
277 	trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
278 }
279 
280 static void fjes_update_zone_irq(struct fjes_adapter *adapter,
281 				 int src_epid)
282 {
283 	struct fjes_hw *hw = &adapter->hw;
284 
285 	if (!work_pending(&hw->update_zone_task))
286 		queue_work(adapter->control_wq, &hw->update_zone_task);
287 }
288 
289 static irqreturn_t fjes_intr(int irq, void *data)
290 {
291 	struct fjes_adapter *adapter = data;
292 	struct fjes_hw *hw = &adapter->hw;
293 	irqreturn_t ret;
294 	u32 icr;
295 
296 	icr = fjes_hw_capture_interrupt_status(hw);
297 
298 	if (icr & REG_IS_MASK_IS_ASSERT) {
299 		if (icr & REG_ICTL_MASK_RX_DATA) {
300 			fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
301 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
302 				.recv_intr_rx += 1;
303 		}
304 
305 		if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
306 			fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
307 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
308 				.recv_intr_stop += 1;
309 		}
310 
311 		if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
312 			fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
313 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
314 				.recv_intr_unshare += 1;
315 		}
316 
317 		if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
318 			fjes_hw_set_irqmask(hw,
319 					    REG_ICTL_MASK_TXRX_STOP_DONE, true);
320 
321 		if (icr & REG_ICTL_MASK_INFO_UPDATE) {
322 			fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
323 			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
324 				.recv_intr_zoneupdate += 1;
325 		}
326 
327 		ret = IRQ_HANDLED;
328 	} else {
329 		ret = IRQ_NONE;
330 	}
331 
332 	return ret;
333 }
334 
335 static int fjes_request_irq(struct fjes_adapter *adapter)
336 {
337 	struct net_device *netdev = adapter->netdev;
338 	int result = -1;
339 
340 	adapter->interrupt_watch_enable = true;
341 	if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
342 		queue_delayed_work(adapter->control_wq,
343 				   &adapter->interrupt_watch_task,
344 				   FJES_IRQ_WATCH_DELAY);
345 	}
346 
347 	if (!adapter->irq_registered) {
348 		result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
349 				     IRQF_SHARED, netdev->name, adapter);
350 		if (result)
351 			adapter->irq_registered = false;
352 		else
353 			adapter->irq_registered = true;
354 	}
355 
356 	return result;
357 }
358 
359 static void fjes_free_irq(struct fjes_adapter *adapter)
360 {
361 	struct fjes_hw *hw = &adapter->hw;
362 
363 	adapter->interrupt_watch_enable = false;
364 	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
365 
366 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
367 
368 	if (adapter->irq_registered) {
369 		free_irq(adapter->hw.hw_res.irq, adapter);
370 		adapter->irq_registered = false;
371 	}
372 }
373 
374 static void fjes_free_resources(struct fjes_adapter *adapter)
375 {
376 	struct net_device *netdev = adapter->netdev;
377 	struct fjes_device_command_param param;
378 	struct ep_share_mem_info *buf_pair;
379 	struct fjes_hw *hw = &adapter->hw;
380 	bool reset_flag = false;
381 	unsigned long flags;
382 	int result;
383 	int epidx;
384 
385 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
386 		if (epidx == hw->my_epid)
387 			continue;
388 
389 		mutex_lock(&hw->hw_info.lock);
390 		result = fjes_hw_unregister_buff_addr(hw, epidx);
391 		mutex_unlock(&hw->hw_info.lock);
392 
393 		hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
394 
395 		if (result)
396 			reset_flag = true;
397 
398 		buf_pair = &hw->ep_shm_info[epidx];
399 
400 		spin_lock_irqsave(&hw->rx_status_lock, flags);
401 		fjes_hw_setup_epbuf(&buf_pair->tx,
402 				    netdev->dev_addr, netdev->mtu);
403 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
404 
405 		clear_bit(epidx, &hw->txrx_stop_req_bit);
406 	}
407 
408 	if (reset_flag || adapter->force_reset) {
409 		result = fjes_hw_reset(hw);
410 
411 		adapter->force_reset = false;
412 
413 		if (result)
414 			adapter->open_guard = true;
415 
416 		hw->hw_info.buffer_share_bit = 0;
417 
418 		memset((void *)&param, 0, sizeof(param));
419 
420 		param.req_len = hw->hw_info.req_buf_size;
421 		param.req_start = __pa(hw->hw_info.req_buf);
422 		param.res_len = hw->hw_info.res_buf_size;
423 		param.res_start = __pa(hw->hw_info.res_buf);
424 		param.share_start = __pa(hw->hw_info.share->ep_status);
425 
426 		fjes_hw_init_command_registers(hw, &param);
427 	}
428 }
429 
430 /* fjes_open - Called when a network interface is made active */
431 static int fjes_open(struct net_device *netdev)
432 {
433 	struct fjes_adapter *adapter = netdev_priv(netdev);
434 	struct fjes_hw *hw = &adapter->hw;
435 	int result;
436 
437 	if (adapter->open_guard)
438 		return -ENXIO;
439 
440 	result = fjes_setup_resources(adapter);
441 	if (result)
442 		goto err_setup_res;
443 
444 	hw->txrx_stop_req_bit = 0;
445 	hw->epstop_req_bit = 0;
446 
447 	napi_enable(&adapter->napi);
448 
449 	fjes_hw_capture_interrupt_status(hw);
450 
451 	result = fjes_request_irq(adapter);
452 	if (result)
453 		goto err_req_irq;
454 
455 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
456 
457 	netif_tx_start_all_queues(netdev);
458 	netif_carrier_on(netdev);
459 
460 	return 0;
461 
462 err_req_irq:
463 	fjes_free_irq(adapter);
464 	napi_disable(&adapter->napi);
465 
466 err_setup_res:
467 	fjes_free_resources(adapter);
468 	return result;
469 }
470 
471 /* fjes_close - Disables a network interface */
472 static int fjes_close(struct net_device *netdev)
473 {
474 	struct fjes_adapter *adapter = netdev_priv(netdev);
475 	struct fjes_hw *hw = &adapter->hw;
476 	unsigned long flags;
477 	int epidx;
478 
479 	netif_tx_stop_all_queues(netdev);
480 	netif_carrier_off(netdev);
481 
482 	fjes_hw_raise_epstop(hw);
483 
484 	napi_disable(&adapter->napi);
485 
486 	spin_lock_irqsave(&hw->rx_status_lock, flags);
487 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
488 		if (epidx == hw->my_epid)
489 			continue;
490 
491 		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
492 		    EP_PARTNER_SHARED)
493 			adapter->hw.ep_shm_info[epidx]
494 				   .tx.info->v1i.rx_status &=
495 				~FJES_RX_POLL_WORK;
496 	}
497 	spin_unlock_irqrestore(&hw->rx_status_lock, flags);
498 
499 	fjes_free_irq(adapter);
500 
501 	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
502 	cancel_work_sync(&adapter->unshare_watch_task);
503 	adapter->unshare_watch_bitmask = 0;
504 	cancel_work_sync(&adapter->raise_intr_rxdata_task);
505 	cancel_work_sync(&adapter->tx_stall_task);
506 
507 	cancel_work_sync(&hw->update_zone_task);
508 	cancel_work_sync(&hw->epstop_task);
509 
510 	fjes_hw_wait_epstop(hw);
511 
512 	fjes_free_resources(adapter);
513 
514 	return 0;
515 }
516 
517 static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
518 			void *data, size_t len)
519 {
520 	int retval;
521 
522 	retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
523 					   data, len);
524 	if (retval)
525 		return retval;
526 
527 	adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
528 		FJES_TX_DELAY_SEND_PENDING;
529 	if (!work_pending(&adapter->raise_intr_rxdata_task))
530 		queue_work(adapter->txrx_wq,
531 			   &adapter->raise_intr_rxdata_task);
532 
533 	retval = 0;
534 	return retval;
535 }
536 
537 static netdev_tx_t
538 fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
539 {
540 	struct fjes_adapter *adapter = netdev_priv(netdev);
541 	struct fjes_hw *hw = &adapter->hw;
542 
543 	int max_epid, my_epid, dest_epid;
544 	enum ep_partner_status pstatus;
545 	struct netdev_queue *cur_queue;
546 	char shortpkt[VLAN_ETH_HLEN];
547 	bool is_multi, vlan;
548 	struct ethhdr *eth;
549 	u16 queue_no = 0;
550 	u16 vlan_id = 0;
551 	netdev_tx_t ret;
552 	char *data;
553 	int len;
554 
555 	ret = NETDEV_TX_OK;
556 	is_multi = false;
557 	cur_queue = netdev_get_tx_queue(netdev, queue_no);
558 
559 	eth = (struct ethhdr *)skb->data;
560 	my_epid = hw->my_epid;
561 
562 	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
563 
564 	data = skb->data;
565 	len = skb->len;
566 
567 	if (is_multicast_ether_addr(eth->h_dest)) {
568 		dest_epid = 0;
569 		max_epid = hw->max_epid;
570 		is_multi = true;
571 	} else if (is_local_ether_addr(eth->h_dest)) {
572 		dest_epid = eth->h_dest[ETH_ALEN - 1];
573 		max_epid = dest_epid + 1;
574 
575 		if ((eth->h_dest[0] == 0x02) &&
576 		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
577 			      eth->h_dest[3] | eth->h_dest[4])) &&
578 		    (dest_epid < hw->max_epid)) {
579 			;
580 		} else {
581 			dest_epid = 0;
582 			max_epid = 0;
583 			ret = NETDEV_TX_OK;
584 
585 			adapter->stats64.tx_packets += 1;
586 			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
587 			adapter->stats64.tx_bytes += len;
588 			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
589 		}
590 	} else {
591 		dest_epid = 0;
592 		max_epid = 0;
593 		ret = NETDEV_TX_OK;
594 
595 		adapter->stats64.tx_packets += 1;
596 		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
597 		adapter->stats64.tx_bytes += len;
598 		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
599 	}
600 
601 	for (; dest_epid < max_epid; dest_epid++) {
602 		if (my_epid == dest_epid)
603 			continue;
604 
605 		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
606 		if (pstatus != EP_PARTNER_SHARED) {
607 			if (!is_multi)
608 				hw->ep_shm_info[dest_epid].ep_stats
609 					.tx_dropped_not_shared += 1;
610 			ret = NETDEV_TX_OK;
611 		} else if (!fjes_hw_check_epbuf_version(
612 				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
613 			/* version is NOT 0 */
614 			adapter->stats64.tx_carrier_errors += 1;
615 			hw->ep_shm_info[dest_epid].net_stats
616 						.tx_carrier_errors += 1;
617 			hw->ep_shm_info[dest_epid].ep_stats
618 					.tx_dropped_ver_mismatch += 1;
619 
620 			ret = NETDEV_TX_OK;
621 		} else if (!fjes_hw_check_mtu(
622 				&adapter->hw.ep_shm_info[dest_epid].rx,
623 				netdev->mtu)) {
624 			adapter->stats64.tx_dropped += 1;
625 			hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
626 			adapter->stats64.tx_errors += 1;
627 			hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
628 			hw->ep_shm_info[dest_epid].ep_stats
629 					.tx_dropped_buf_size_mismatch += 1;
630 
631 			ret = NETDEV_TX_OK;
632 		} else if (vlan &&
633 			   !fjes_hw_check_vlan_id(
634 				&adapter->hw.ep_shm_info[dest_epid].rx,
635 				vlan_id)) {
636 			hw->ep_shm_info[dest_epid].ep_stats
637 				.tx_dropped_vlanid_mismatch += 1;
638 			ret = NETDEV_TX_OK;
639 		} else {
640 			if (len < VLAN_ETH_HLEN) {
641 				memset(shortpkt, 0, VLAN_ETH_HLEN);
642 				memcpy(shortpkt, skb->data, skb->len);
643 				len = VLAN_ETH_HLEN;
644 				data = shortpkt;
645 			}
646 
647 			if (adapter->tx_retry_count == 0) {
648 				adapter->tx_start_jiffies = jiffies;
649 				adapter->tx_retry_count = 1;
650 			} else {
651 				adapter->tx_retry_count++;
652 			}
653 
654 			if (fjes_tx_send(adapter, dest_epid, data, len)) {
655 				if (is_multi) {
656 					ret = NETDEV_TX_OK;
657 				} else if (
658 					   ((long)jiffies -
659 					    (long)adapter->tx_start_jiffies) >=
660 					    FJES_TX_RETRY_TIMEOUT) {
661 					adapter->stats64.tx_fifo_errors += 1;
662 					hw->ep_shm_info[dest_epid].net_stats
663 								.tx_fifo_errors += 1;
664 					adapter->stats64.tx_errors += 1;
665 					hw->ep_shm_info[dest_epid].net_stats
666 								.tx_errors += 1;
667 
668 					ret = NETDEV_TX_OK;
669 				} else {
670 					netif_trans_update(netdev);
671 					hw->ep_shm_info[dest_epid].ep_stats
672 						.tx_buffer_full += 1;
673 					netif_tx_stop_queue(cur_queue);
674 
675 					if (!work_pending(&adapter->tx_stall_task))
676 						queue_work(adapter->txrx_wq,
677 							   &adapter->tx_stall_task);
678 
679 					ret = NETDEV_TX_BUSY;
680 				}
681 			} else {
682 				if (!is_multi) {
683 					adapter->stats64.tx_packets += 1;
684 					hw->ep_shm_info[dest_epid].net_stats
685 								.tx_packets += 1;
686 					adapter->stats64.tx_bytes += len;
687 					hw->ep_shm_info[dest_epid].net_stats
688 								.tx_bytes += len;
689 				}
690 
691 				adapter->tx_retry_count = 0;
692 				ret = NETDEV_TX_OK;
693 			}
694 		}
695 	}
696 
697 	if (ret == NETDEV_TX_OK) {
698 		dev_kfree_skb(skb);
699 		if (is_multi) {
700 			adapter->stats64.tx_packets += 1;
701 			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
702 			adapter->stats64.tx_bytes += 1;
703 			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
704 		}
705 	}
706 
707 	return ret;
708 }
709 
710 static void
711 fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
712 {
713 	struct fjes_adapter *adapter = netdev_priv(netdev);
714 
715 	memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
716 }
717 
718 static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
719 {
720 	struct fjes_adapter *adapter = netdev_priv(netdev);
721 	bool running = netif_running(netdev);
722 	struct fjes_hw *hw = &adapter->hw;
723 	unsigned long flags;
724 	int ret = -EINVAL;
725 	int idx, epidx;
726 
727 	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
728 		if (new_mtu <= fjes_support_mtu[idx]) {
729 			new_mtu = fjes_support_mtu[idx];
730 			if (new_mtu == netdev->mtu)
731 				return 0;
732 
733 			ret = 0;
734 			break;
735 		}
736 	}
737 
738 	if (ret)
739 		return ret;
740 
741 	if (running) {
742 		spin_lock_irqsave(&hw->rx_status_lock, flags);
743 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
744 			if (epidx == hw->my_epid)
745 				continue;
746 			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
747 				~FJES_RX_MTU_CHANGING_DONE;
748 		}
749 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
750 
751 		netif_tx_stop_all_queues(netdev);
752 		netif_carrier_off(netdev);
753 		cancel_work_sync(&adapter->tx_stall_task);
754 		napi_disable(&adapter->napi);
755 
756 		msleep(1000);
757 
758 		netif_tx_stop_all_queues(netdev);
759 	}
760 
761 	WRITE_ONCE(netdev->mtu, new_mtu);
762 
763 	if (running) {
764 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
765 			if (epidx == hw->my_epid)
766 				continue;
767 
768 			spin_lock_irqsave(&hw->rx_status_lock, flags);
769 			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
770 					    netdev->dev_addr,
771 					    netdev->mtu);
772 
773 			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
774 				FJES_RX_MTU_CHANGING_DONE;
775 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
776 		}
777 
778 		netif_tx_wake_all_queues(netdev);
779 		netif_carrier_on(netdev);
780 		napi_enable(&adapter->napi);
781 		napi_schedule(&adapter->napi);
782 	}
783 
784 	return ret;
785 }
786 
787 static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
788 {
789 	struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
790 
791 	netif_tx_wake_queue(queue);
792 }
793 
794 static int fjes_vlan_rx_add_vid(struct net_device *netdev,
795 				__be16 proto, u16 vid)
796 {
797 	struct fjes_adapter *adapter = netdev_priv(netdev);
798 	bool ret = true;
799 	int epid;
800 
801 	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
802 		if (epid == adapter->hw.my_epid)
803 			continue;
804 
805 		if (!fjes_hw_check_vlan_id(
806 			&adapter->hw.ep_shm_info[epid].tx, vid))
807 			ret = fjes_hw_set_vlan_id(
808 				&adapter->hw.ep_shm_info[epid].tx, vid);
809 	}
810 
811 	return ret ? 0 : -ENOSPC;
812 }
813 
814 static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
815 				 __be16 proto, u16 vid)
816 {
817 	struct fjes_adapter *adapter = netdev_priv(netdev);
818 	int epid;
819 
820 	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
821 		if (epid == adapter->hw.my_epid)
822 			continue;
823 
824 		fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
825 	}
826 
827 	return 0;
828 }
829 
830 static const struct net_device_ops fjes_netdev_ops = {
831 	.ndo_open		= fjes_open,
832 	.ndo_stop		= fjes_close,
833 	.ndo_start_xmit		= fjes_xmit_frame,
834 	.ndo_get_stats64	= fjes_get_stats64,
835 	.ndo_change_mtu		= fjes_change_mtu,
836 	.ndo_tx_timeout		= fjes_tx_retry,
837 	.ndo_vlan_rx_add_vid	= fjes_vlan_rx_add_vid,
838 	.ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
839 };
840 
841 /* fjes_netdev_setup - netdevice initialization routine */
842 static void fjes_netdev_setup(struct net_device *netdev)
843 {
844 	ether_setup(netdev);
845 
846 	netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
847 	netdev->netdev_ops = &fjes_netdev_ops;
848 	fjes_set_ethtool_ops(netdev);
849 	netdev->mtu = fjes_support_mtu[3];
850 	netdev->min_mtu = fjes_support_mtu[0];
851 	netdev->max_mtu = fjes_support_mtu[3];
852 	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
853 }
854 
855 static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
856 				     int start_epid)
857 {
858 	struct fjes_hw *hw = &adapter->hw;
859 	enum ep_partner_status pstatus;
860 	int max_epid, cur_epid;
861 	int i;
862 
863 	max_epid = hw->max_epid;
864 	start_epid = (start_epid + 1 + max_epid) % max_epid;
865 
866 	for (i = 0; i < max_epid; i++) {
867 		cur_epid = (start_epid + i) % max_epid;
868 		if (cur_epid == hw->my_epid)
869 			continue;
870 
871 		pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
872 		if (pstatus == EP_PARTNER_SHARED) {
873 			if (!fjes_hw_epbuf_rx_is_empty(
874 				&hw->ep_shm_info[cur_epid].rx))
875 				return cur_epid;
876 		}
877 	}
878 	return -1;
879 }
880 
881 static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
882 			      int *cur_epid)
883 {
884 	void *frame;
885 
886 	*cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
887 	if (*cur_epid < 0)
888 		return NULL;
889 
890 	frame =
891 	fjes_hw_epbuf_rx_curpkt_get_addr(
892 		&adapter->hw.ep_shm_info[*cur_epid].rx, psize);
893 
894 	return frame;
895 }
896 
897 static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
898 {
899 	fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
900 }
901 
902 static int fjes_poll(struct napi_struct *napi, int budget)
903 {
904 	struct fjes_adapter *adapter =
905 			container_of(napi, struct fjes_adapter, napi);
906 	struct net_device *netdev = napi->dev;
907 	struct fjes_hw *hw = &adapter->hw;
908 	struct sk_buff *skb;
909 	int work_done = 0;
910 	int cur_epid = 0;
911 	int epidx;
912 	size_t frame_len;
913 	void *frame;
914 
915 	spin_lock(&hw->rx_status_lock);
916 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
917 		if (epidx == hw->my_epid)
918 			continue;
919 
920 		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
921 		    EP_PARTNER_SHARED)
922 			adapter->hw.ep_shm_info[epidx]
923 				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
924 	}
925 	spin_unlock(&hw->rx_status_lock);
926 
927 	while (work_done < budget) {
928 		prefetch(&adapter->hw);
929 		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
930 
931 		if (frame) {
932 			skb = napi_alloc_skb(napi, frame_len);
933 			if (!skb) {
934 				adapter->stats64.rx_dropped += 1;
935 				hw->ep_shm_info[cur_epid].net_stats
936 							 .rx_dropped += 1;
937 				adapter->stats64.rx_errors += 1;
938 				hw->ep_shm_info[cur_epid].net_stats
939 							 .rx_errors += 1;
940 			} else {
941 				skb_put_data(skb, frame, frame_len);
942 				skb->protocol = eth_type_trans(skb, netdev);
943 				skb->ip_summed = CHECKSUM_UNNECESSARY;
944 
945 				netif_receive_skb(skb);
946 
947 				work_done++;
948 
949 				adapter->stats64.rx_packets += 1;
950 				hw->ep_shm_info[cur_epid].net_stats
951 							 .rx_packets += 1;
952 				adapter->stats64.rx_bytes += frame_len;
953 				hw->ep_shm_info[cur_epid].net_stats
954 							 .rx_bytes += frame_len;
955 
956 				if (is_multicast_ether_addr(
957 					((struct ethhdr *)frame)->h_dest)) {
958 					adapter->stats64.multicast += 1;
959 					hw->ep_shm_info[cur_epid].net_stats
960 								 .multicast += 1;
961 				}
962 			}
963 
964 			fjes_rxframe_release(adapter, cur_epid);
965 			adapter->unset_rx_last = true;
966 		} else {
967 			break;
968 		}
969 	}
970 
971 	if (work_done < budget) {
972 		napi_complete_done(napi, work_done);
973 
974 		if (adapter->unset_rx_last) {
975 			adapter->rx_last_jiffies = jiffies;
976 			adapter->unset_rx_last = false;
977 		}
978 
979 		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
980 			napi_schedule(napi);
981 		} else {
982 			spin_lock(&hw->rx_status_lock);
983 			for (epidx = 0; epidx < hw->max_epid; epidx++) {
984 				if (epidx == hw->my_epid)
985 					continue;
986 				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
987 				    EP_PARTNER_SHARED)
988 					adapter->hw.ep_shm_info[epidx].tx
989 						   .info->v1i.rx_status &=
990 						~FJES_RX_POLL_WORK;
991 			}
992 			spin_unlock(&hw->rx_status_lock);
993 
994 			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
995 		}
996 	}
997 
998 	return work_done;
999 }
1000 
1001 static int fjes_sw_init(struct fjes_adapter *adapter)
1002 {
1003 	struct net_device *netdev = adapter->netdev;
1004 
1005 	netif_napi_add(netdev, &adapter->napi, fjes_poll);
1006 
1007 	return 0;
1008 }
1009 
1010 static void fjes_force_close_task(struct work_struct *work)
1011 {
1012 	struct fjes_adapter *adapter = container_of(work,
1013 			struct fjes_adapter, force_close_task);
1014 	struct net_device *netdev = adapter->netdev;
1015 
1016 	rtnl_lock();
1017 	dev_close(netdev);
1018 	rtnl_unlock();
1019 }
1020 
1021 static void fjes_tx_stall_task(struct work_struct *work)
1022 {
1023 	struct fjes_adapter *adapter = container_of(work,
1024 			struct fjes_adapter, tx_stall_task);
1025 	struct net_device *netdev = adapter->netdev;
1026 	struct fjes_hw *hw = &adapter->hw;
1027 	int all_queue_available, sendable;
1028 	enum ep_partner_status pstatus;
1029 	int max_epid, my_epid, epid;
1030 	union ep_buffer_info *info;
1031 	int i;
1032 
1033 	if (((long)jiffies -
1034 		dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
1035 		netif_wake_queue(netdev);
1036 		return;
1037 	}
1038 
1039 	my_epid = hw->my_epid;
1040 	max_epid = hw->max_epid;
1041 
1042 	for (i = 0; i < 5; i++) {
1043 		all_queue_available = 1;
1044 
1045 		for (epid = 0; epid < max_epid; epid++) {
1046 			if (my_epid == epid)
1047 				continue;
1048 
1049 			pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1050 			sendable = (pstatus == EP_PARTNER_SHARED);
1051 			if (!sendable)
1052 				continue;
1053 
1054 			info = adapter->hw.ep_shm_info[epid].tx.info;
1055 
1056 			if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
1057 				return;
1058 
1059 			if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
1060 					 info->v1i.count_max)) {
1061 				all_queue_available = 0;
1062 				break;
1063 			}
1064 		}
1065 
1066 		if (all_queue_available) {
1067 			netif_wake_queue(netdev);
1068 			return;
1069 		}
1070 	}
1071 
1072 	usleep_range(50, 100);
1073 
1074 	queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
1075 }
1076 
1077 static void fjes_raise_intr_rxdata_task(struct work_struct *work)
1078 {
1079 	struct fjes_adapter *adapter = container_of(work,
1080 			struct fjes_adapter, raise_intr_rxdata_task);
1081 	struct fjes_hw *hw = &adapter->hw;
1082 	enum ep_partner_status pstatus;
1083 	int max_epid, my_epid, epid;
1084 
1085 	my_epid = hw->my_epid;
1086 	max_epid = hw->max_epid;
1087 
1088 	for (epid = 0; epid < max_epid; epid++)
1089 		hw->ep_shm_info[epid].tx_status_work = 0;
1090 
1091 	for (epid = 0; epid < max_epid; epid++) {
1092 		if (epid == my_epid)
1093 			continue;
1094 
1095 		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1096 		if (pstatus == EP_PARTNER_SHARED) {
1097 			hw->ep_shm_info[epid].tx_status_work =
1098 				hw->ep_shm_info[epid].tx.info->v1i.tx_status;
1099 
1100 			if (hw->ep_shm_info[epid].tx_status_work ==
1101 				FJES_TX_DELAY_SEND_PENDING) {
1102 				hw->ep_shm_info[epid].tx.info->v1i.tx_status =
1103 					FJES_TX_DELAY_SEND_NONE;
1104 			}
1105 		}
1106 	}
1107 
1108 	for (epid = 0; epid < max_epid; epid++) {
1109 		if (epid == my_epid)
1110 			continue;
1111 
1112 		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1113 		if ((hw->ep_shm_info[epid].tx_status_work ==
1114 		     FJES_TX_DELAY_SEND_PENDING) &&
1115 		    (pstatus == EP_PARTNER_SHARED) &&
1116 		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
1117 		      FJES_RX_POLL_WORK)) {
1118 			fjes_hw_raise_interrupt(hw, epid,
1119 						REG_ICTL_MASK_RX_DATA);
1120 			hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
1121 		}
1122 	}
1123 
1124 	usleep_range(500, 1000);
1125 }
1126 
1127 static void fjes_watch_unshare_task(struct work_struct *work)
1128 {
1129 	struct fjes_adapter *adapter =
1130 	container_of(work, struct fjes_adapter, unshare_watch_task);
1131 
1132 	struct net_device *netdev = adapter->netdev;
1133 	struct fjes_hw *hw = &adapter->hw;
1134 
1135 	int unshare_watch, unshare_reserve;
1136 	int max_epid, my_epid, epidx;
1137 	int stop_req, stop_req_done;
1138 	ulong unshare_watch_bitmask;
1139 	unsigned long flags;
1140 	int wait_time = 0;
1141 	int is_shared;
1142 	int ret;
1143 
1144 	my_epid = hw->my_epid;
1145 	max_epid = hw->max_epid;
1146 
1147 	unshare_watch_bitmask = adapter->unshare_watch_bitmask;
1148 	adapter->unshare_watch_bitmask = 0;
1149 
1150 	while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
1151 	       (wait_time < 3000)) {
1152 		for (epidx = 0; epidx < max_epid; epidx++) {
1153 			if (epidx == my_epid)
1154 				continue;
1155 
1156 			is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
1157 							   epidx);
1158 
1159 			stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
1160 
1161 			stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
1162 					FJES_RX_STOP_REQ_DONE;
1163 
1164 			unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
1165 
1166 			unshare_reserve = test_bit(epidx,
1167 						   &hw->hw_info.buffer_unshare_reserve_bit);
1168 
1169 			if ((!stop_req ||
1170 			     (is_shared && (!is_shared || !stop_req_done))) &&
1171 			    (is_shared || !unshare_watch || !unshare_reserve))
1172 				continue;
1173 
1174 			mutex_lock(&hw->hw_info.lock);
1175 			ret = fjes_hw_unregister_buff_addr(hw, epidx);
1176 			switch (ret) {
1177 			case 0:
1178 				break;
1179 			case -ENOMSG:
1180 			case -EBUSY:
1181 			default:
1182 				if (!work_pending(
1183 					&adapter->force_close_task)) {
1184 					adapter->force_reset = true;
1185 					schedule_work(
1186 						&adapter->force_close_task);
1187 				}
1188 				break;
1189 			}
1190 			mutex_unlock(&hw->hw_info.lock);
1191 			hw->ep_shm_info[epidx].ep_stats
1192 					.com_unregist_buf_exec += 1;
1193 
1194 			spin_lock_irqsave(&hw->rx_status_lock, flags);
1195 			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1196 					    netdev->dev_addr, netdev->mtu);
1197 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1198 
1199 			clear_bit(epidx, &hw->txrx_stop_req_bit);
1200 			clear_bit(epidx, &unshare_watch_bitmask);
1201 			clear_bit(epidx,
1202 				  &hw->hw_info.buffer_unshare_reserve_bit);
1203 		}
1204 
1205 		msleep(100);
1206 		wait_time += 100;
1207 	}
1208 
1209 	if (hw->hw_info.buffer_unshare_reserve_bit) {
1210 		for (epidx = 0; epidx < max_epid; epidx++) {
1211 			if (epidx == my_epid)
1212 				continue;
1213 
1214 			if (test_bit(epidx,
1215 				     &hw->hw_info.buffer_unshare_reserve_bit)) {
1216 				mutex_lock(&hw->hw_info.lock);
1217 
1218 				ret = fjes_hw_unregister_buff_addr(hw, epidx);
1219 				switch (ret) {
1220 				case 0:
1221 					break;
1222 				case -ENOMSG:
1223 				case -EBUSY:
1224 				default:
1225 					if (!work_pending(
1226 						&adapter->force_close_task)) {
1227 						adapter->force_reset = true;
1228 						schedule_work(
1229 							&adapter->force_close_task);
1230 					}
1231 					break;
1232 				}
1233 				mutex_unlock(&hw->hw_info.lock);
1234 
1235 				hw->ep_shm_info[epidx].ep_stats
1236 					.com_unregist_buf_exec += 1;
1237 
1238 				spin_lock_irqsave(&hw->rx_status_lock, flags);
1239 				fjes_hw_setup_epbuf(
1240 					&hw->ep_shm_info[epidx].tx,
1241 					netdev->dev_addr, netdev->mtu);
1242 				spin_unlock_irqrestore(&hw->rx_status_lock,
1243 						       flags);
1244 
1245 				clear_bit(epidx, &hw->txrx_stop_req_bit);
1246 				clear_bit(epidx, &unshare_watch_bitmask);
1247 				clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1248 			}
1249 
1250 			if (test_bit(epidx, &unshare_watch_bitmask)) {
1251 				spin_lock_irqsave(&hw->rx_status_lock, flags);
1252 				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
1253 						~FJES_RX_STOP_REQ_DONE;
1254 				spin_unlock_irqrestore(&hw->rx_status_lock,
1255 						       flags);
1256 			}
1257 		}
1258 	}
1259 }
1260 
1261 static void fjes_irq_watch_task(struct work_struct *work)
1262 {
1263 	struct fjes_adapter *adapter = container_of(to_delayed_work(work),
1264 			struct fjes_adapter, interrupt_watch_task);
1265 
1266 	local_irq_disable();
1267 	fjes_intr(adapter->hw.hw_res.irq, adapter);
1268 	local_irq_enable();
1269 
1270 	if (fjes_rxframe_search_exist(adapter, 0) >= 0)
1271 		napi_schedule(&adapter->napi);
1272 
1273 	if (adapter->interrupt_watch_enable) {
1274 		if (!delayed_work_pending(&adapter->interrupt_watch_task))
1275 			queue_delayed_work(adapter->control_wq,
1276 					   &adapter->interrupt_watch_task,
1277 					   FJES_IRQ_WATCH_DELAY);
1278 	}
1279 }
1280 
1281 /* fjes_probe - Device Initialization Routine */
1282 static int fjes_probe(struct platform_device *plat_dev)
1283 {
1284 	struct fjes_adapter *adapter;
1285 	struct net_device *netdev;
1286 	struct resource *res;
1287 	struct fjes_hw *hw;
1288 	u8 addr[ETH_ALEN];
1289 	int err;
1290 
1291 	err = -ENOMEM;
1292 	netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
1293 				 NET_NAME_UNKNOWN, fjes_netdev_setup,
1294 				 FJES_MAX_QUEUES);
1295 
1296 	if (!netdev)
1297 		goto err_out;
1298 
1299 	SET_NETDEV_DEV(netdev, &plat_dev->dev);
1300 
1301 	dev_set_drvdata(&plat_dev->dev, netdev);
1302 	adapter = netdev_priv(netdev);
1303 	adapter->netdev = netdev;
1304 	adapter->plat_dev = plat_dev;
1305 	hw = &adapter->hw;
1306 	hw->back = adapter;
1307 
1308 	/* setup the private structure */
1309 	err = fjes_sw_init(adapter);
1310 	if (err)
1311 		goto err_free_netdev;
1312 
1313 	INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
1314 	adapter->force_reset = false;
1315 	adapter->open_guard = false;
1316 
1317 	adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx",
1318 					   WQ_MEM_RECLAIM | WQ_PERCPU, 0);
1319 	if (unlikely(!adapter->txrx_wq)) {
1320 		err = -ENOMEM;
1321 		goto err_free_netdev;
1322 	}
1323 
1324 	adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1325 					      WQ_MEM_RECLAIM | WQ_PERCPU, 0);
1326 	if (unlikely(!adapter->control_wq)) {
1327 		err = -ENOMEM;
1328 		goto err_free_txrx_wq;
1329 	}
1330 
1331 	INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1332 	INIT_WORK(&adapter->raise_intr_rxdata_task,
1333 		  fjes_raise_intr_rxdata_task);
1334 	INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
1335 	adapter->unshare_watch_bitmask = 0;
1336 
1337 	INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
1338 	adapter->interrupt_watch_enable = false;
1339 
1340 	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
1341 	if (!res) {
1342 		err = -EINVAL;
1343 		goto err_free_control_wq;
1344 	}
1345 	hw->hw_res.start = res->start;
1346 	hw->hw_res.size = resource_size(res);
1347 	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1348 	if (hw->hw_res.irq < 0) {
1349 		err = hw->hw_res.irq;
1350 		goto err_free_control_wq;
1351 	}
1352 
1353 	err = fjes_hw_init(&adapter->hw);
1354 	if (err)
1355 		goto err_free_control_wq;
1356 
1357 	/* setup MAC address (02:00:00:00:00:[epid])*/
1358 	addr[0] = 2;
1359 	addr[1] = 0;
1360 	addr[2] = 0;
1361 	addr[3] = 0;
1362 	addr[4] = 0;
1363 	addr[5] = hw->my_epid; /* EPID */
1364 	eth_hw_addr_set(netdev, addr);
1365 
1366 	err = register_netdev(netdev);
1367 	if (err)
1368 		goto err_hw_exit;
1369 
1370 	netif_carrier_off(netdev);
1371 
1372 	fjes_dbg_adapter_init(adapter);
1373 
1374 	return 0;
1375 
1376 err_hw_exit:
1377 	fjes_hw_exit(&adapter->hw);
1378 err_free_control_wq:
1379 	destroy_workqueue(adapter->control_wq);
1380 err_free_txrx_wq:
1381 	destroy_workqueue(adapter->txrx_wq);
1382 err_free_netdev:
1383 	free_netdev(netdev);
1384 err_out:
1385 	return err;
1386 }
1387 
1388 /* fjes_remove - Device Removal Routine */
1389 static void fjes_remove(struct platform_device *plat_dev)
1390 {
1391 	struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
1392 	struct fjes_adapter *adapter = netdev_priv(netdev);
1393 	struct fjes_hw *hw = &adapter->hw;
1394 
1395 	fjes_dbg_adapter_exit(adapter);
1396 
1397 	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
1398 	cancel_work_sync(&adapter->unshare_watch_task);
1399 	cancel_work_sync(&adapter->raise_intr_rxdata_task);
1400 	cancel_work_sync(&adapter->tx_stall_task);
1401 	if (adapter->control_wq)
1402 		destroy_workqueue(adapter->control_wq);
1403 	if (adapter->txrx_wq)
1404 		destroy_workqueue(adapter->txrx_wq);
1405 
1406 	unregister_netdev(netdev);
1407 
1408 	fjes_hw_exit(hw);
1409 
1410 	netif_napi_del(&adapter->napi);
1411 
1412 	free_netdev(netdev);
1413 }
1414 
1415 static struct platform_driver fjes_driver = {
1416 	.driver = {
1417 		.name = DRV_NAME,
1418 	},
1419 	.probe = fjes_probe,
1420 	.remove = fjes_remove,
1421 };
1422 
1423 struct fjes_acpi_walk_context {
1424 	struct acpi_device *adev;
1425 	struct resource resources[2];
1426 };
1427 
1428 static acpi_status
1429 acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
1430 				 void *context, void **return_value)
1431 {
1432 	struct fjes_acpi_walk_context *fjes_context = context;
1433 	struct acpi_device *device;
1434 	acpi_status status;
1435 
1436 	device = acpi_get_acpi_dev(obj_handle);
1437 	if (!device)
1438 		return AE_OK;
1439 
1440 	if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
1441 		goto skip;
1442 
1443 	if (!is_extended_socket_device(device))
1444 		goto skip;
1445 
1446 	if (acpi_check_extended_socket_status(device))
1447 		goto skip;
1448 
1449 	status = acpi_walk_resources(obj_handle, METHOD_NAME__CRS,
1450 				     fjes_get_acpi_resource, fjes_context->resources);
1451 	if (ACPI_FAILURE(status))
1452 		goto skip;
1453 
1454 	fjes_context->adev = device;
1455 
1456 	return AE_CTRL_TERMINATE;
1457 
1458 skip:
1459 	acpi_dev_put(device);
1460 	return AE_OK;
1461 }
1462 
1463 static struct platform_device *fjes_plat_dev;
1464 
1465 /* fjes_init_module - Driver Registration Routine */
1466 static int __init fjes_init_module(void)
1467 {
1468 	struct fjes_acpi_walk_context fjes_context = {
1469 		.adev = NULL,
1470 		.resources = {
1471 			DEFINE_RES_MEM(0, 1),
1472 			DEFINE_RES_IRQ(0)
1473 		}
1474 	};
1475 	struct platform_device_info pdevinfo;
1476 	int result;
1477 
1478 	acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
1479 			    acpi_find_extended_socket_device, NULL, &fjes_context,
1480 			    NULL);
1481 	if (!fjes_context.adev)
1482 		return -ENODEV;
1483 
1484 	memset(&pdevinfo, 0, sizeof(pdevinfo));
1485 
1486 	pdevinfo.name = DRV_NAME;
1487 	pdevinfo.res = fjes_context.resources;
1488 	pdevinfo.num_res = ARRAY_SIZE(fjes_context.resources);
1489 	pdevinfo.fwnode = acpi_fwnode_handle(fjes_context.adev);
1490 
1491 	fjes_plat_dev = platform_device_register_full(&pdevinfo);
1492 
1493 	acpi_dev_put(fjes_context.adev);
1494 
1495 	if (IS_ERR(fjes_plat_dev))
1496 		return PTR_ERR(fjes_plat_dev);
1497 
1498 	pr_info("%s - version %s - %s\n",
1499 		fjes_driver_string, fjes_driver_version, fjes_copyright);
1500 
1501 	fjes_dbg_init();
1502 
1503 	result = platform_driver_register(&fjes_driver);
1504 	if (result < 0) {
1505 		fjes_dbg_exit();
1506 		platform_device_unregister(fjes_plat_dev);
1507 		return result;
1508 	}
1509 
1510 	return 0;
1511 }
1512 
1513 module_init(fjes_init_module);
1514 
1515 /* fjes_exit_module - Driver Exit Cleanup Routine */
1516 static void __exit fjes_exit_module(void)
1517 {
1518 	platform_driver_unregister(&fjes_driver);
1519 	fjes_dbg_exit();
1520 	platform_device_unregister(fjes_plat_dev);
1521 }
1522 
1523 module_exit(fjes_exit_module);
1524