xref: /freebsd/sys/dev/ixl/ixl_pf_iflib.c (revision 1603881667360c015f6685131f2f25474fa67a72)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2020, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl_pf.h"
36 
37 void
38 ixl_configure_tx_itr(struct ixl_pf *pf)
39 {
40 	struct i40e_hw		*hw = &pf->hw;
41 	struct ixl_vsi		*vsi = &pf->vsi;
42 	struct ixl_tx_queue	*que = vsi->tx_queues;
43 
44 	vsi->tx_itr_setting = pf->tx_itr;
45 
46 	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
47 		struct tx_ring	*txr = &que->txr;
48 
49 		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
50 		    vsi->tx_itr_setting);
51 		txr->itr = vsi->tx_itr_setting;
52 		txr->latency = IXL_AVE_LATENCY;
53 	}
54 }
55 
56 void
57 ixl_configure_rx_itr(struct ixl_pf *pf)
58 {
59 	struct i40e_hw		*hw = &pf->hw;
60 	struct ixl_vsi		*vsi = &pf->vsi;
61 	struct ixl_rx_queue	*que = vsi->rx_queues;
62 
63 	vsi->rx_itr_setting = pf->rx_itr;
64 
65 	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
66 		struct rx_ring 	*rxr = &que->rxr;
67 
68 		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
69 		    vsi->rx_itr_setting);
70 		rxr->itr = vsi->rx_itr_setting;
71 		rxr->latency = IXL_AVE_LATENCY;
72 	}
73 }
74 
75 int
76 ixl_intr(void *arg)
77 {
78 	struct ixl_pf		*pf = arg;
79 	struct i40e_hw		*hw =  &pf->hw;
80 	struct ixl_vsi		*vsi = &pf->vsi;
81 	struct ixl_rx_queue	*que = vsi->rx_queues;
82         u32			icr0;
83 
84 	++que->irqs;
85 
86 	/* Clear PBA at start of ISR if using legacy interrupts */
87 	if (vsi->shared->isc_intr == IFLIB_INTR_LEGACY)
88 		wr32(hw, I40E_PFINT_DYN_CTL0,
89 		    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
90 		    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
91 
92 	icr0 = rd32(hw, I40E_PFINT_ICR0);
93 
94 
95 #ifdef PCI_IOV
96 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
97 		iflib_iov_intr_deferred(vsi->ctx);
98 #endif
99 
100 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
101 		iflib_admin_intr_deferred(vsi->ctx);
102 
103 	ixl_enable_intr0(hw);
104 
105 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
106 		return (FILTER_SCHEDULE_THREAD);
107 	else
108 		return (FILTER_HANDLED);
109 }
110 
111 /*********************************************************************
112  *
113  *  MSI-X VSI Interrupt Service routine
114  *
115  **********************************************************************/
116 int
117 ixl_msix_que(void *arg)
118 {
119 	struct ixl_rx_queue *rx_que = arg;
120 
121 	++rx_que->irqs;
122 
123 	ixl_set_queue_rx_itr(rx_que);
124 
125 	return (FILTER_SCHEDULE_THREAD);
126 }
127 
128 /*********************************************************************
129  *
130  *  MSI-X Admin Queue Interrupt Service routine
131  *
132  **********************************************************************/
133 int
134 ixl_msix_adminq(void *arg)
135 {
136 	struct ixl_pf	*pf = arg;
137 	struct i40e_hw	*hw = &pf->hw;
138 	device_t	dev = pf->dev;
139 	u32		reg, mask, rstat_reg;
140 	bool		do_task = FALSE;
141 
142 	DDPRINTF(dev, "begin");
143 
144 	++pf->admin_irq;
145 
146 	reg = rd32(hw, I40E_PFINT_ICR0);
147 	/*
148 	 * For masking off interrupt causes that need to be handled before
149 	 * they can be re-enabled
150 	 */
151 	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
152 
153 	/* Check on the cause */
154 	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
155 		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
156 		do_task = TRUE;
157 	}
158 
159 	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
160 		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
161 		atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
162 		do_task = TRUE;
163 	}
164 
165 	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
166 		const char *reset_type;
167 		mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
168 		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
169 		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
170 		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
171 		switch (rstat_reg) {
172 		/* These others might be handled similarly to an EMPR reset */
173 		case I40E_RESET_CORER:
174 			reset_type = "CORER";
175 			break;
176 		case I40E_RESET_GLOBR:
177 			reset_type = "GLOBR";
178 			break;
179 		case I40E_RESET_EMPR:
180 			reset_type = "EMPR";
181 			break;
182 		default:
183 			reset_type = "POR";
184 			break;
185 		}
186 		device_printf(dev, "Reset Requested! (%s)\n", reset_type);
187 		/* overload admin queue task to check reset progress */
188 		atomic_set_int(&pf->state, IXL_PF_STATE_RESETTING);
189 		do_task = TRUE;
190 	}
191 
192 	/*
193 	 * PE / PCI / ECC exceptions are all handled in the same way:
194 	 * mask out these three causes, then request a PF reset
195 	 */
196 	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
197 		device_printf(dev, "ECC Error detected!\n");
198 	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
199 		device_printf(dev, "PCI Exception detected!\n");
200 	if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
201 		device_printf(dev, "Critical Protocol Engine Error detected!\n");
202 	/* Checks against the conditions above */
203 	if (reg & IXL_ICR0_CRIT_ERR_MASK) {
204 		mask &= ~IXL_ICR0_CRIT_ERR_MASK;
205 		atomic_set_32(&pf->state,
206 		    IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
207 		do_task = TRUE;
208 	}
209 
210 	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
211 		reg = rd32(hw, I40E_PFHMC_ERRORINFO);
212 		if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
213 			device_printf(dev, "HMC Error detected!\n");
214 			device_printf(dev, "INFO 0x%08x\n", reg);
215 			reg = rd32(hw, I40E_PFHMC_ERRORDATA);
216 			device_printf(dev, "DATA 0x%08x\n", reg);
217 			wr32(hw, I40E_PFHMC_ERRORINFO, 0);
218 		}
219 	}
220 
221 #ifdef PCI_IOV
222 	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
223 		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
224 		iflib_iov_intr_deferred(pf->vsi.ctx);
225 	}
226 #endif
227 
228 	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
229 	ixl_enable_intr0(hw);
230 
231 	if (do_task)
232 		return (FILTER_SCHEDULE_THREAD);
233 	else
234 		return (FILTER_HANDLED);
235 }
236 
237 /*
238  * Configure queue interrupt cause registers in hardware.
239  *
240  * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
241  */
242 void
243 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
244 {
245 	struct i40e_hw *hw = &pf->hw;
246 	struct ixl_vsi *vsi = &pf->vsi;
247 	u32		reg;
248 	u16		vector = 1;
249 
250 	for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
251 		/* Make sure interrupt is disabled */
252 		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
253 		/* Set linked list head to point to corresponding RX queue
254 		 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
255 		reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
256 		        & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
257 		    ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
258 		        & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
259 		wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
260 
261 		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
262 		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
263 		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
264 		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
265 		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
266 		wr32(hw, I40E_QINT_RQCTL(i), reg);
267 
268 		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
269 		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
270 		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
271 		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
272 		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
273 		wr32(hw, I40E_QINT_TQCTL(i), reg);
274 	}
275 }
276 
277 /*
278  * Configure for single interrupt vector operation
279  */
280 void
281 ixl_configure_legacy(struct ixl_pf *pf)
282 {
283 	struct i40e_hw	*hw = &pf->hw;
284 	struct ixl_vsi	*vsi = &pf->vsi;
285 	u32 reg;
286 
287 	vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
288 
289 	/* Setup "other" causes */
290 	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
291 	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
292 	    | I40E_PFINT_ICR0_ENA_GRST_MASK
293 	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
294 	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
295 	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
296 	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
297 	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
298 	    ;
299 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
300 
301 	/* No ITR for non-queue interrupts */
302 	wr32(hw, I40E_PFINT_STAT_CTL0,
303 	    IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
304 
305 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
306 	wr32(hw, I40E_PFINT_LNKLST0, 0);
307 
308 	/* Associate the queue pair to the vector and enable the q int */
309 	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
310 	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
311 	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
312 	wr32(hw, I40E_QINT_RQCTL(0), reg);
313 
314 	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
315 	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
316 	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
317 	wr32(hw, I40E_QINT_TQCTL(0), reg);
318 }
319 
320 void
321 ixl_free_pci_resources(struct ixl_pf *pf)
322 {
323 	struct ixl_vsi		*vsi = &pf->vsi;
324 	device_t		dev = iflib_get_dev(vsi->ctx);
325 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
326 
327 	/* We may get here before stations are set up */
328 	if (rx_que == NULL)
329 		goto early;
330 
331 	/*
332 	**  Release all MSI-X VSI resources:
333 	*/
334 	iflib_irq_free(vsi->ctx, &vsi->irq);
335 
336 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
337 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
338 early:
339 	if (pf->pci_mem != NULL)
340 		bus_release_resource(dev, SYS_RES_MEMORY,
341 		    rman_get_rid(pf->pci_mem), pf->pci_mem);
342 }
343 
344 /*********************************************************************
345  *
346  *  Setup networking device structure and register an interface.
347  *
348  **********************************************************************/
349 int
350 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
351 {
352 	struct ixl_vsi *vsi = &pf->vsi;
353 	if_ctx_t ctx = vsi->ctx;
354 	struct i40e_hw *hw = &pf->hw;
355 	struct ifnet *ifp = iflib_get_ifp(ctx);
356 	struct i40e_aq_get_phy_abilities_resp abilities;
357 	enum i40e_status_code aq_error = 0;
358 
359 	INIT_DBG_DEV(dev, "begin");
360 
361 	vsi->shared->isc_max_frame_size =
362 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
363 	    + ETHER_VLAN_ENCAP_LEN;
364 
365 	if (IXL_PF_IN_RECOVERY_MODE(pf))
366 		goto only_auto;
367 
368 	aq_error = i40e_aq_get_phy_capabilities(hw,
369 	    FALSE, TRUE, &abilities, NULL);
370 	/* May need delay to detect fiber correctly */
371 	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
372 		i40e_msec_delay(200);
373 		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
374 		    TRUE, &abilities, NULL);
375 	}
376 	if (aq_error) {
377 		if (aq_error == I40E_ERR_UNKNOWN_PHY)
378 			device_printf(dev, "Unknown PHY type detected!\n");
379 		else
380 			device_printf(dev,
381 			    "Error getting supported media types, err %d,"
382 			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
383 	} else {
384 		pf->supported_speeds = abilities.link_speed;
385 		if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
386 
387 		ixl_add_ifmedia(vsi->media, hw->phy.phy_types);
388 	}
389 
390 only_auto:
391 	/* Use autoselect media by default */
392 	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
393 	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
394 
395 	return (0);
396 }
397 
398 /*
399 ** Run when the Admin Queue gets a link state change interrupt.
400 */
401 void
402 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
403 {
404 	struct i40e_hw *hw = &pf->hw;
405 	device_t dev = iflib_get_dev(pf->vsi.ctx);
406 	struct i40e_aqc_get_link_status *status =
407 	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
408 
409 	/* Request link status from adapter */
410 	hw->phy.get_link_info = TRUE;
411 	i40e_get_link_status(hw, &pf->link_up);
412 
413 	/* Print out message if an unqualified module is found */
414 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
415 	    (pf->advertised_speed) &&
416 	    (atomic_load_32(&pf->state) &
417 	     IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) != 0 &&
418 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
419 	    (!(status->link_info & I40E_AQ_LINK_UP)))
420 		device_printf(dev, "Link failed because "
421 		    "an unqualified module was detected!\n");
422 
423 	/* OS link info is updated elsewhere */
424 }
425 
426 /*********************************************************************
427  *
428  *  Initialize the VSI:  this handles contexts, which means things
429  *  			 like the number of descriptors, buffer size,
430  *			 plus we init the rings thru this function.
431  *
432  **********************************************************************/
433 int
434 ixl_initialize_vsi(struct ixl_vsi *vsi)
435 {
436 	struct ixl_pf *pf = vsi->back;
437 	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
438 	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
439 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
440 	device_t		dev = iflib_get_dev(vsi->ctx);
441 	struct i40e_hw		*hw = vsi->hw;
442 	struct i40e_vsi_context	ctxt;
443 	int 			tc_queues;
444 	int			err = 0;
445 
446 	memset(&ctxt, 0, sizeof(ctxt));
447 	ctxt.seid = vsi->seid;
448 	if (pf->veb_seid != 0)
449 		ctxt.uplink_seid = pf->veb_seid;
450 	ctxt.pf_num = hw->pf_id;
451 	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
452 	if (err) {
453 		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
454 		    " aq_error %d\n", err, hw->aq.asq_last_status);
455 		return (err);
456 	}
457 	ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
458 	    "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
459 	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
460 	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
461 	    ctxt.uplink_seid, ctxt.vsi_number,
462 	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
463 	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
464 	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
465 	/*
466 	** Set the queue and traffic class bits
467 	**  - when multiple traffic classes are supported
468 	**    this will need to be more robust.
469 	*/
470 	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
471 	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
472 	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
473 	ctxt.info.queue_mapping[0] = 0;
474 	/*
475 	 * This VSI will only use traffic class 0; start traffic class 0's
476 	 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
477 	 * the driver may not use all of them).
478 	 */
479 	tc_queues = fls(pf->qtag.num_allocated) - 1;
480 	ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
481 	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
482 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
483 	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
484 
485 	/* Set VLAN receive stripping mode */
486 	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
487 	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
488 	if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
489 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
490 	else
491 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
492 
493 #ifdef IXL_IW
494 	/* Set TCP Enable for iWARP capable VSI */
495 	if (ixl_enable_iwarp && pf->iw_enabled) {
496 		ctxt.info.valid_sections |=
497 		    htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
498 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
499 	}
500 #endif
501 	/* Save VSI number and info for use later */
502 	vsi->vsi_num = ctxt.vsi_number;
503 	bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
504 
505 	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
506 
507 	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
508 	if (err) {
509 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
510 		    " aq_error %d\n", err, hw->aq.asq_last_status);
511 		return (err);
512 	}
513 
514 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
515 		struct tx_ring		*txr = &tx_que->txr;
516 		struct i40e_hmc_obj_txq tctx;
517 		u32			txctl;
518 
519 		/* Setup the HMC TX Context  */
520 		bzero(&tctx, sizeof(tctx));
521 		tctx.new_context = 1;
522 		tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
523 		tctx.qlen = scctx->isc_ntxd[0];
524 		tctx.fc_ena = 0;	/* Disable FCoE */
525 		/*
526 		 * This value needs to pulled from the VSI that this queue
527 		 * is assigned to. Index into array is traffic class.
528 		 */
529 		tctx.rdylist = vsi->info.qs_handle[0];
530 		/*
531 		 * Set these to enable Head Writeback
532 		 * - Address is last entry in TX ring (reserved for HWB index)
533 		 * Leave these as 0 for Descriptor Writeback
534 		 */
535 		if (vsi->enable_head_writeback) {
536 			tctx.head_wb_ena = 1;
537 			tctx.head_wb_addr = txr->tx_paddr +
538 			    (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
539 		} else {
540 			tctx.head_wb_ena = 0;
541 			tctx.head_wb_addr = 0;
542 		}
543 		tctx.rdylist_act = 0;
544 		err = i40e_clear_lan_tx_queue_context(hw, i);
545 		if (err) {
546 			device_printf(dev, "Unable to clear TX context\n");
547 			break;
548 		}
549 		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
550 		if (err) {
551 			device_printf(dev, "Unable to set TX context\n");
552 			break;
553 		}
554 		/* Associate the ring with this PF */
555 		txctl = I40E_QTX_CTL_PF_QUEUE;
556 		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
557 		    I40E_QTX_CTL_PF_INDX_MASK);
558 		wr32(hw, I40E_QTX_CTL(i), txctl);
559 		ixl_flush(hw);
560 
561 		/* Do ring (re)init */
562 		ixl_init_tx_ring(vsi, tx_que);
563 	}
564 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
565 		struct rx_ring 		*rxr = &rx_que->rxr;
566 		struct i40e_hmc_obj_rxq rctx;
567 
568 		/* Next setup the HMC RX Context  */
569 		rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
570 
571 		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
572 
573 		/* Set up an RX context for the HMC */
574 		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
575 		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
576 		/* ignore header split for now */
577 		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
578 		rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
579 		    scctx->isc_max_frame_size : max_rxmax;
580 		rctx.dtype = 0;
581 		rctx.dsize = 1;		/* do 32byte descriptors */
582 		rctx.hsplit_0 = 0;	/* no header split */
583 		rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
584 		rctx.qlen = scctx->isc_nrxd[0];
585 		rctx.tphrdesc_ena = 1;
586 		rctx.tphwdesc_ena = 1;
587 		rctx.tphdata_ena = 0;	/* Header Split related */
588 		rctx.tphhead_ena = 0;	/* Header Split related */
589 		rctx.lrxqthresh = 1;	/* Interrupt at <64 desc avail */
590 		rctx.crcstrip = 1;
591 		rctx.l2tsel = 1;
592 		rctx.showiv = 1;	/* Strip inner VLAN header */
593 		rctx.fc_ena = 0;	/* Disable FCoE */
594 		rctx.prefena = 1;	/* Prefetch descriptors */
595 
596 		err = i40e_clear_lan_rx_queue_context(hw, i);
597 		if (err) {
598 			device_printf(dev,
599 			    "Unable to clear RX context %d\n", i);
600 			break;
601 		}
602 		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
603 		if (err) {
604 			device_printf(dev, "Unable to set RX context %d\n", i);
605 			break;
606 		}
607 		wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
608 	}
609 	return (err);
610 }
611 
612 
613 /*
614 ** Provide a update to the queue RX
615 ** interrupt moderation value.
616 */
617 void
618 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
619 {
620 	struct ixl_vsi	*vsi = que->vsi;
621 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
622 	struct i40e_hw	*hw = vsi->hw;
623 	struct rx_ring	*rxr = &que->rxr;
624 	u16		rx_itr;
625 	u16		rx_latency = 0;
626 	int		rx_bytes;
627 
628 	/* Idle, do nothing */
629 	if (rxr->bytes == 0)
630 		return;
631 
632 	if (pf->dynamic_rx_itr) {
633 		rx_bytes = rxr->bytes/rxr->itr;
634 		rx_itr = rxr->itr;
635 
636 		/* Adjust latency range */
637 		switch (rxr->latency) {
638 		case IXL_LOW_LATENCY:
639 			if (rx_bytes > 10) {
640 				rx_latency = IXL_AVE_LATENCY;
641 				rx_itr = IXL_ITR_20K;
642 			}
643 			break;
644 		case IXL_AVE_LATENCY:
645 			if (rx_bytes > 20) {
646 				rx_latency = IXL_BULK_LATENCY;
647 				rx_itr = IXL_ITR_8K;
648 			} else if (rx_bytes <= 10) {
649 				rx_latency = IXL_LOW_LATENCY;
650 				rx_itr = IXL_ITR_100K;
651 			}
652 			break;
653 		case IXL_BULK_LATENCY:
654 			if (rx_bytes <= 20) {
655 				rx_latency = IXL_AVE_LATENCY;
656 				rx_itr = IXL_ITR_20K;
657 			}
658 			break;
659 		}
660 
661 		rxr->latency = rx_latency;
662 
663 		if (rx_itr != rxr->itr) {
664 			/* do an exponential smoothing */
665 			rx_itr = (10 * rx_itr * rxr->itr) /
666 			    ((9 * rx_itr) + rxr->itr);
667 			rxr->itr = min(rx_itr, IXL_MAX_ITR);
668 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
669 			    rxr->me), rxr->itr);
670 		}
671 	} else { /* We may have have toggled to non-dynamic */
672 		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
673 			vsi->rx_itr_setting = pf->rx_itr;
674 		/* Update the hardware if needed */
675 		if (rxr->itr != vsi->rx_itr_setting) {
676 			rxr->itr = vsi->rx_itr_setting;
677 			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
678 			    rxr->me), rxr->itr);
679 		}
680 	}
681 	rxr->bytes = 0;
682 	rxr->packets = 0;
683 }
684 
685 
686 /*
687 ** Provide a update to the queue TX
688 ** interrupt moderation value.
689 */
690 void
691 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
692 {
693 	struct ixl_vsi	*vsi = que->vsi;
694 	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
695 	struct i40e_hw	*hw = vsi->hw;
696 	struct tx_ring	*txr = &que->txr;
697 	u16		tx_itr;
698 	u16		tx_latency = 0;
699 	int		tx_bytes;
700 
701 
702 	/* Idle, do nothing */
703 	if (txr->bytes == 0)
704 		return;
705 
706 	if (pf->dynamic_tx_itr) {
707 		tx_bytes = txr->bytes/txr->itr;
708 		tx_itr = txr->itr;
709 
710 		switch (txr->latency) {
711 		case IXL_LOW_LATENCY:
712 			if (tx_bytes > 10) {
713 				tx_latency = IXL_AVE_LATENCY;
714 				tx_itr = IXL_ITR_20K;
715 			}
716 			break;
717 		case IXL_AVE_LATENCY:
718 			if (tx_bytes > 20) {
719 				tx_latency = IXL_BULK_LATENCY;
720 				tx_itr = IXL_ITR_8K;
721 			} else if (tx_bytes <= 10) {
722 				tx_latency = IXL_LOW_LATENCY;
723 				tx_itr = IXL_ITR_100K;
724 			}
725 			break;
726 		case IXL_BULK_LATENCY:
727 			if (tx_bytes <= 20) {
728 				tx_latency = IXL_AVE_LATENCY;
729 				tx_itr = IXL_ITR_20K;
730 			}
731 			break;
732 		}
733 
734 		txr->latency = tx_latency;
735 
736 		if (tx_itr != txr->itr) {
737 			/* do an exponential smoothing */
738 			tx_itr = (10 * tx_itr * txr->itr) /
739 			    ((9 * tx_itr) + txr->itr);
740 			txr->itr = min(tx_itr, IXL_MAX_ITR);
741 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
742 			    txr->me), txr->itr);
743 		}
744 
745 	} else { /* We may have have toggled to non-dynamic */
746 		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
747 			vsi->tx_itr_setting = pf->tx_itr;
748 		/* Update the hardware if needed */
749 		if (txr->itr != vsi->tx_itr_setting) {
750 			txr->itr = vsi->tx_itr_setting;
751 			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
752 			    txr->me), txr->itr);
753 		}
754 	}
755 	txr->bytes = 0;
756 	txr->packets = 0;
757 	return;
758 }
759 
760 #ifdef IXL_DEBUG
761 /**
762  * ixl_sysctl_qtx_tail_handler
763  * Retrieves I40E_QTX_TAIL value from hardware
764  * for a sysctl.
765  */
766 int
767 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
768 {
769 	struct ixl_tx_queue *tx_que;
770 	int error;
771 	u32 val;
772 
773 	tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
774 	if (!tx_que) return 0;
775 
776 	val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
777 	error = sysctl_handle_int(oidp, &val, 0, req);
778 	if (error || !req->newptr)
779 		return error;
780 	return (0);
781 }
782 
783 /**
784  * ixl_sysctl_qrx_tail_handler
785  * Retrieves I40E_QRX_TAIL value from hardware
786  * for a sysctl.
787  */
788 int
789 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
790 {
791 	struct ixl_rx_queue *rx_que;
792 	int error;
793 	u32 val;
794 
795 	rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
796 	if (!rx_que) return 0;
797 
798 	val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
799 	error = sysctl_handle_int(oidp, &val, 0, req);
800 	if (error || !req->newptr)
801 		return error;
802 	return (0);
803 }
804 #endif
805 
806 void
807 ixl_add_hw_stats(struct ixl_pf *pf)
808 {
809 	struct ixl_vsi *vsi = &pf->vsi;
810 	device_t dev = iflib_get_dev(vsi->ctx);
811 	struct i40e_hw_port_stats *pf_stats = &pf->stats;
812 
813 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
814 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
815 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
816 
817 	/* Driver statistics */
818 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
819 			CTLFLAG_RD, &pf->admin_irq,
820 			"Admin Queue IRQs received");
821 
822 	sysctl_ctx_init(&vsi->sysctl_ctx);
823 	ixl_vsi_add_sysctls(vsi, "pf", true);
824 
825 	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
826 }
827 
828 void
829 ixl_set_rss_hlut(struct ixl_pf *pf)
830 {
831 	struct i40e_hw	*hw = &pf->hw;
832 	struct ixl_vsi *vsi = &pf->vsi;
833 	device_t	dev = iflib_get_dev(vsi->ctx);
834 	int		i, que_id;
835 	int		lut_entry_width;
836 	u32		lut = 0;
837 	enum i40e_status_code status;
838 
839 	lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
840 
841 	/* Populate the LUT with max no. of queues in round robin fashion */
842 	u8 hlut_buf[512];
843 	for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
844 #ifdef RSS
845 		/*
846 		 * Fetch the RSS bucket id for the given indirection entry.
847 		 * Cap it at the number of configured buckets (which is
848 		 * num_queues.)
849 		 */
850 		que_id = rss_get_indirection_to_bucket(i);
851 		que_id = que_id % vsi->num_rx_queues;
852 #else
853 		que_id = i % vsi->num_rx_queues;
854 #endif
855 		lut = (que_id & ((0x1 << lut_entry_width) - 1));
856 		hlut_buf[i] = lut;
857 	}
858 
859 	if (hw->mac.type == I40E_MAC_X722) {
860 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
861 		if (status)
862 			device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
863 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
864 	} else {
865 		for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
866 			wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
867 		ixl_flush(hw);
868 	}
869 }
870 
871 /* For PF VSI only */
872 int
873 ixl_enable_rings(struct ixl_vsi *vsi)
874 {
875 	struct ixl_pf	*pf = vsi->back;
876 	int		error = 0;
877 
878 	for (int i = 0; i < vsi->num_tx_queues; i++)
879 		error = ixl_enable_tx_ring(pf, &pf->qtag, i);
880 
881 	for (int i = 0; i < vsi->num_rx_queues; i++)
882 		error = ixl_enable_rx_ring(pf, &pf->qtag, i);
883 
884 	return (error);
885 }
886 
887 int
888 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
889 {
890 	int error = 0;
891 
892 	for (int i = 0; i < vsi->num_tx_queues; i++)
893 		error = ixl_disable_tx_ring(pf, qtag, i);
894 
895 	for (int i = 0; i < vsi->num_rx_queues; i++)
896 		error = ixl_disable_rx_ring(pf, qtag, i);
897 
898 	return (error);
899 }
900 
901 void
902 ixl_enable_intr(struct ixl_vsi *vsi)
903 {
904 	struct i40e_hw		*hw = vsi->hw;
905 	struct ixl_rx_queue	*que = vsi->rx_queues;
906 
907 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
908 		for (int i = 0; i < vsi->num_rx_queues; i++, que++)
909 			ixl_enable_queue(hw, que->rxr.me);
910 	} else
911 		ixl_enable_intr0(hw);
912 }
913 
914 void
915 ixl_disable_rings_intr(struct ixl_vsi *vsi)
916 {
917 	struct i40e_hw		*hw = vsi->hw;
918 	struct ixl_rx_queue	*que = vsi->rx_queues;
919 
920 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
921 		ixl_disable_queue(hw, que->rxr.me);
922 }
923 
924 int
925 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
926 {
927 	struct i40e_hw *hw = &pf->hw;
928 	device_t dev = pf->dev;
929 	int error = 0;
930 
931 	if (is_up)
932 		ixl_if_stop(pf->vsi.ctx);
933 
934 	ixl_shutdown_hmc(pf);
935 
936 	ixl_disable_intr0(hw);
937 
938 	error = i40e_shutdown_adminq(hw);
939 	if (error)
940 		device_printf(dev,
941 		    "Shutdown Admin queue failed with code %d\n", error);
942 
943 	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
944 	return (error);
945 }
946 
947 int
948 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
949 {
950 	struct i40e_hw *hw = &pf->hw;
951 	struct ixl_vsi *vsi = &pf->vsi;
952 	device_t dev = pf->dev;
953 	enum i40e_get_fw_lldp_status_resp lldp_status;
954 	int error = 0;
955 
956 	device_printf(dev, "Rebuilding driver state...\n");
957 
958 	/* Setup */
959 	error = i40e_init_adminq(hw);
960 	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
961 		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
962 		    error);
963 		goto ixl_rebuild_hw_structs_after_reset_err;
964 	}
965 
966 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
967 		/* Keep admin queue interrupts active while driver is loaded */
968 		if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
969 			ixl_configure_intr0_msix(pf);
970 			ixl_enable_intr0(hw);
971 		}
972 
973 		return (0);
974 	}
975 
976 	i40e_clear_pxe_mode(hw);
977 
978 	error = ixl_get_hw_capabilities(pf);
979 	if (error) {
980 		device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
981 		goto ixl_rebuild_hw_structs_after_reset_err;
982 	}
983 
984 	error = ixl_setup_hmc(pf);
985 	if (error)
986 		goto ixl_rebuild_hw_structs_after_reset_err;
987 
988 	/* reserve a contiguous allocation for the PF's VSI */
989 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
990 	if (error) {
991 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
992 		    error);
993 	}
994 
995 	error = ixl_switch_config(pf);
996 	if (error) {
997 		device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
998 		     error);
999 		error = EIO;
1000 		goto ixl_rebuild_hw_structs_after_reset_err;
1001 	}
1002 
1003 	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
1004 	    NULL);
1005 	if (error) {
1006 		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
1007 		    " aq_err %d\n", error, hw->aq.asq_last_status);
1008 		error = EIO;
1009 		goto ixl_rebuild_hw_structs_after_reset_err;
1010 	}
1011 
1012 	u8 set_fc_err_mask;
1013 	error = i40e_set_fc(hw, &set_fc_err_mask, true);
1014 	if (error) {
1015 		device_printf(dev, "init: setting link flow control failed; retcode %d,"
1016 		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
1017 		error = EIO;
1018 		goto ixl_rebuild_hw_structs_after_reset_err;
1019 	}
1020 
1021 	/* Remove default filters reinstalled by FW on reset */
1022 	ixl_del_default_hw_filters(vsi);
1023 
1024 	/* Receive broadcast Ethernet frames */
1025 	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1026 
1027 	/* Determine link state */
1028 	if (ixl_attach_get_link_status(pf)) {
1029 		error = EINVAL;
1030 	}
1031 
1032 	i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
1033 
1034 	/* Query device FW LLDP status */
1035 	if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
1036 		if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
1037 			atomic_set_32(&pf->state,
1038 			    IXL_PF_STATE_FW_LLDP_DISABLED);
1039 		} else {
1040 			atomic_clear_32(&pf->state,
1041 			    IXL_PF_STATE_FW_LLDP_DISABLED);
1042 		}
1043 	}
1044 
1045 	/* Keep admin queue interrupts active while driver is loaded */
1046 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1047 		ixl_configure_intr0_msix(pf);
1048 		ixl_enable_intr0(hw);
1049 	}
1050 
1051 	if (is_up) {
1052 		iflib_request_reset(vsi->ctx);
1053 		iflib_admin_intr_deferred(vsi->ctx);
1054 	}
1055 
1056 	device_printf(dev, "Rebuilding driver state done.\n");
1057 	return (0);
1058 
1059 ixl_rebuild_hw_structs_after_reset_err:
1060 	device_printf(dev, "Reload the driver to recover\n");
1061 	return (error);
1062 }
1063 
1064 /*
1065 ** Set flow control using sysctl:
1066 ** 	0 - off
1067 **	1 - rx pause
1068 **	2 - tx pause
1069 **	3 - full
1070 */
1071 int
1072 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
1073 {
1074 	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1075 	struct i40e_hw *hw = &pf->hw;
1076 	device_t dev = pf->dev;
1077 	int requested_fc, error = 0;
1078 	enum i40e_status_code aq_error = 0;
1079 	u8 fc_aq_err = 0;
1080 
1081 	/* Get request */
1082 	requested_fc = pf->fc;
1083 	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
1084 	if ((error) || (req->newptr == NULL))
1085 		return (error);
1086 	if (requested_fc < 0 || requested_fc > 3) {
1087 		device_printf(dev,
1088 		    "Invalid fc mode; valid modes are 0 through 3\n");
1089 		return (EINVAL);
1090 	}
1091 
1092 	/* Set fc ability for port */
1093 	hw->fc.requested_mode = requested_fc;
1094 	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
1095 	if (aq_error) {
1096 		device_printf(dev,
1097 		    "%s: Error setting Flow Control mode %d; fc_err %#x\n",
1098 		    __func__, aq_error, fc_aq_err);
1099 		return (EIO);
1100 	}
1101 	pf->fc = requested_fc;
1102 
1103 	return (0);
1104 }
1105