xref: /illumos-gate/usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring.c (revision 3893cb7fe5bfa1c9a4f7954517a917367f6cf081)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright (c) 2002-2006 Neterion, Inc.
22  */
23 
24 #include "xgehal-ring.h"
25 #include "xgehal-device.h"
26 
27 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
28 static ptrdiff_t
29 __hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh,
30 			   void *item)
31 {
32 	int memblock_idx;
33 	void *memblock;
34 
35 	/* get owner memblock index */
36 	memblock_idx = __hal_ring_block_memblock_idx(item);
37 
38 	/* get owner memblock by memblock index */
39 	memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
40 
41 	return (char*)item - (char*)memblock;
42 }
43 #endif
44 
45 static dma_addr_t
46 __hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
47 		pci_dma_h *dma_handle)
48 {
49 	int memblock_idx;
50 	void *memblock;
51 	xge_hal_mempool_dma_t *memblock_dma_object;
52 	ptrdiff_t dma_item_offset;
53 
54 	/* get owner memblock index */
55 	memblock_idx = __hal_ring_block_memblock_idx((xge_hal_ring_block_t *) item);
56 
57 	/* get owner memblock by memblock index */
58 	memblock = __hal_mempool_memblock((xge_hal_mempool_t *) mempoolh,
59 										memblock_idx);
60 
61 	/* get memblock DMA object by memblock index */
62 	memblock_dma_object =
63 		__hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh,
64 									memblock_idx);
65 
66 	/* calculate offset in the memblock of this item */
67 	dma_item_offset = (char*)item - (char*)memblock;
68 
69 	*dma_handle = memblock_dma_object->handle;
70 
71 	return memblock_dma_object->addr + dma_item_offset;
72 }
73 
74 static void
75 __hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
76 			 xge_hal_ring_t *ring, int from, int to)
77 {
78 	xge_hal_ring_block_t *to_item, *from_item;
79 	dma_addr_t to_dma, from_dma;
80 	pci_dma_h to_dma_handle, from_dma_handle;
81 
82 	/* get "from" RxD block */
83 	from_item = (xge_hal_ring_block_t *)
84 				__hal_mempool_item((xge_hal_mempool_t *) mempoolh, from);
85 	xge_assert(from_item);
86 
87 	/* get "to" RxD block */
88 	to_item = (xge_hal_ring_block_t *)
89               __hal_mempool_item((xge_hal_mempool_t *) mempoolh, to);
90 	xge_assert(to_item);
91 
92 	/* return address of the beginning of previous RxD block */
93 	to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
94 
95 	/* set next pointer for this RxD block to point on
96 	 * previous item's DMA start address */
97 	__hal_ring_block_next_pointer_set(from_item, to_dma);
98 
99 	/* return "from" RxD block's DMA start address */
100 	from_dma =
101 		__hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle);
102 
103 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
104 	/* we must sync "from" RxD block, so hardware will see it */
105 	xge_os_dma_sync(ring->channel.pdev,
106 	              from_dma_handle,
107 		      from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
108 		      __hal_ring_item_dma_offset(mempoolh, from_item) +
109 					XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
110 		      sizeof(u64),
111 		      XGE_OS_DMA_DIR_TODEVICE);
112 #endif
113 
114 	xge_debug_ring(XGE_TRACE, "block%d:0x"XGE_OS_LLXFMT" => block%d:0x"XGE_OS_LLXFMT,
115 		from, (unsigned long long)from_dma, to,
116 		(unsigned long long)to_dma);
117 }
118 
119 static xge_hal_status_e
120 __hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
121 			      void *memblock,
122 			      int memblock_index,
123 			      xge_hal_mempool_dma_t *dma_object,
124 			      void *item,
125 			      int index,
126 			      int is_last,
127 			      void *userdata)
128 {
129 	int i;
130 	xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata;
131 
132 	xge_assert(item);
133 	xge_assert(ring);
134 
135 
136 	/* format rxds array */
137 	for (i=ring->rxds_per_block-1; i>=0; i--) {
138 		void *rxdblock_priv;
139 		xge_hal_ring_rxd_priv_t *rxd_priv;
140 		xge_hal_ring_rxd_1_t *rxdp;
141 		int reserve_index = index * ring->rxds_per_block + i;
142 		int memblock_item_idx;
143 
144 		ring->reserved_rxds_arr[reserve_index] = (char *)item +
145 				(ring->rxds_per_block - 1 - i) * ring->rxd_size;
146 
147 		/* Note: memblock_item_idx is index of the item within
148 		 *       the memblock. For instance, in case of three RxD-blocks
149 		 *       per memblock this value can be 0,1 or 2. */
150 		rxdblock_priv =
151 			__hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
152 									memblock_index, item,
153 									&memblock_item_idx);
154 		rxdp = (xge_hal_ring_rxd_1_t *)
155 			ring->reserved_rxds_arr[reserve_index];
156 		rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
157 			((char*)rxdblock_priv + ring->rxd_priv_size * i);
158 
159 		/* pre-format per-RxD Ring's private */
160 		rxd_priv->dma_offset = (char*)rxdp - (char*)memblock;
161 		rxd_priv->dma_addr = dma_object->addr +  rxd_priv->dma_offset;
162 		rxd_priv->dma_handle = dma_object->handle;
163 #ifdef XGE_DEBUG_ASSERT
164 		rxd_priv->dma_object = dma_object;
165 #endif
166 
167 		/* pre-format Host_Control */
168 #if defined(XGE_HAL_USE_5B_MODE)
169 		if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
170 			xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp;
171 #if defined(XGE_OS_PLATFORM_64BIT)
172 			xge_assert(memblock_index <= 0xFFFF);
173 			xge_assert(i <= 0xFFFF);
174 			/* store memblock's index */
175 			rxdp_5->host_control = (u32)memblock_index << 16;
176 			/* store index of memblock's private */
177 			rxdp_5->host_control |= (u32)(memblock_item_idx *
178 						    ring->rxds_per_block + i);
179 #else
180 			/* 32-bit case */
181 			rxdp_5->host_control = (u32)rxd_priv;
182 #endif
183 		} else {
184 			/* 1b and 3b modes */
185 			rxdp->host_control = (u64)(ulong_t)rxd_priv;
186 		}
187 #else
188 		/* 1b and 3b modes */
189 		rxdp->host_control = (u64)(ulong_t)rxd_priv;
190 #endif
191 	}
192 
193 	__hal_ring_block_memblock_idx_set((xge_hal_ring_block_t *) item, memblock_index);
194 
195 	if (is_last) {
196 		/* link last one with first one */
197 		__hal_ring_rxdblock_link(mempoolh, ring, 0, index);
198 	}
199 
200 	if (index > 0 ) {
201 		 /* link this RxD block with previous one */
202 		__hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
203 	}
204 
205 	return XGE_HAL_OK;
206 }
207 
208  xge_hal_status_e
209 __hal_ring_initial_replenish(xge_hal_channel_t *channel,
210 			     xge_hal_channel_reopen_e reopen)
211 {
212 	xge_hal_dtr_h dtr;
213 
214 	while (xge_hal_channel_dtr_count(channel) > 0) {
215 		xge_hal_status_e status;
216 
217 		status = xge_hal_ring_dtr_reserve(channel, &dtr);
218 		xge_assert(status == XGE_HAL_OK);
219 
220 		if (channel->dtr_init) {
221 		    status = channel->dtr_init(channel,
222                                         dtr, channel->reserve_length,
223                                         channel->userdata,
224 					reopen);
225 			if (status != XGE_HAL_OK) {
226 				xge_hal_ring_dtr_free(channel, dtr);
227 				xge_hal_channel_abort(channel,
228 					XGE_HAL_CHANNEL_OC_NORMAL);
229 				return status;
230 			}
231 		}
232 
233 		xge_hal_ring_dtr_post(channel, dtr);
234 	}
235 
236 	return XGE_HAL_OK;
237 }
238 
239 xge_hal_status_e
240 __hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
241 {
242 	xge_hal_status_e status;
243 	xge_hal_device_t *hldev;
244 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
245 	xge_hal_ring_queue_t *queue;
246 
247 
248 	/* Note: at this point we have channel.devh and channel.pdev
249 	 *       pre-set only! */
250 
251 	hldev = (xge_hal_device_t *)ring->channel.devh;
252 	ring->config = &hldev->config.ring;
253 	queue = &ring->config->queue[attr->post_qid];
254 	ring->indicate_max_pkts = queue->indicate_max_pkts;
255 	ring->buffer_mode = queue->buffer_mode;
256 
257 	xge_assert(queue->configured);
258 
259 #if defined(XGE_HAL_RX_MULTI_RESERVE)
260 	xge_os_spin_lock_init(&ring->channel.reserve_lock, hldev->pdev);
261 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
262 	xge_os_spin_lock_init_irq(&ring->channel.reserve_lock, hldev->irqh);
263 #endif
264 #if defined(XGE_HAL_RX_MULTI_POST)
265 	xge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
266 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
267 	xge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
268 #endif
269 
270 	ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
271 	ring->rxd_priv_size =
272 		sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;
273 
274 	/* how many RxDs can fit into one block. Depends on configured
275 	 * buffer_mode. */
276 	ring->rxds_per_block = XGE_HAL_RING_RXDS_PER_BLOCK(queue->buffer_mode);
277 
278 	/* calculate actual RxD block private size */
279 	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
280 
281 	ring->reserved_rxds_arr = (void **) xge_os_malloc(ring->channel.pdev,
282 		      sizeof(void*) * queue->max * ring->rxds_per_block);
283 
284 	if (ring->reserved_rxds_arr == NULL) {
285 		__hal_ring_close(channelh);
286 		return XGE_HAL_ERR_OUT_OF_MEMORY;
287 	}
288 
289 	ring->mempool = __hal_mempool_create(
290 				     hldev->pdev,
291 				     ring->config->memblock_size,
292 				     XGE_HAL_RING_RXDBLOCK_SIZE,
293 				     ring->rxdblock_priv_size,
294 				     queue->initial, queue->max,
295 				     __hal_ring_mempool_item_alloc,
296 				     NULL, /* nothing to free */
297 				     ring);
298 	if (ring->mempool == NULL) {
299 		__hal_ring_close(channelh);
300 		return XGE_HAL_ERR_OUT_OF_MEMORY;
301 	}
302 
303 	status = __hal_channel_initialize(channelh,
304 					  attr,
305 					  ring->reserved_rxds_arr,
306 					  queue->initial * ring->rxds_per_block,
307 					  queue->max * ring->rxds_per_block,
308 					  0 /* no threshold for ring! */);
309 	if (status != XGE_HAL_OK) {
310 		__hal_ring_close(channelh);
311 		return status;
312 	}
313 
314 	/* sanity check that everything formatted ok */
315 	xge_assert(ring->reserved_rxds_arr[0] ==
316 		    (char *)ring->mempool->items_arr[0] +
317 		      (ring->rxds_per_block * ring->rxd_size - ring->rxd_size));
318 
319         /* Note:
320 	 * Specifying dtr_init callback means two things:
321 	 * 1) dtrs need to be initialized by ULD at channel-open time;
322 	 * 2) dtrs need to be posted at channel-open time
323 	 *    (that's what the initial_replenish() below does)
324 	 * Currently we don't have a case when the 1) is done without the 2).
325 	 */
326 	if (ring->channel.dtr_init) {
327 		if ((status = __hal_ring_initial_replenish (
328 						(xge_hal_channel_t *) channelh,
329 						XGE_HAL_CHANNEL_OC_NORMAL) )
330 						!= XGE_HAL_OK) {
331 			__hal_ring_close(channelh);
332 			return status;
333 		}
334 	}
335 
336 	/* initial replenish will increment the counter in its post() routine,
337 	 * we have to reset it */
338 	ring->channel.usage_cnt = 0;
339 
340 	return XGE_HAL_OK;
341 }
342 
343 void
344 __hal_ring_close(xge_hal_channel_h channelh)
345 {
346 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
347 	xge_hal_ring_queue_t *queue;
348 #if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
349     defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
350 	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
351 #endif
352 
353 	xge_assert(ring->channel.pdev);
354 
355 	queue = &ring->config->queue[ring->channel.post_qid];
356 
357 	if (ring->mempool) {
358 		__hal_mempool_destroy(ring->mempool);
359 	}
360 
361 	if (ring->reserved_rxds_arr) {
362 		xge_os_free(ring->channel.pdev,
363 		          ring->reserved_rxds_arr,
364 			  sizeof(void*) * queue->max * ring->rxds_per_block);
365 	}
366 
367 	__hal_channel_terminate(channelh);
368 
369 #if defined(XGE_HAL_RX_MULTI_RESERVE)
370 	xge_os_spin_lock_destroy(&ring->channel.reserve_lock, hldev->pdev);
371 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
372 	xge_os_spin_lock_destroy_irq(&ring->channel.reserve_lock, hldev->pdev);
373 #endif
374 #if defined(XGE_HAL_RX_MULTI_POST)
375 	xge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
376 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
377 	xge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
378 #endif
379 }
380 
381 void
382 __hal_ring_prc_enable(xge_hal_channel_h channelh)
383 {
384 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
385 	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
386 	xge_hal_pci_bar0_t *bar0;
387 	u64 val64;
388 	void *first_block;
389 	int block_num;
390 	xge_hal_ring_queue_t *queue;
391 	pci_dma_h dma_handle;
392 
393 	xge_assert(ring);
394 	xge_assert(ring->channel.pdev);
395 	bar0 = (xge_hal_pci_bar0_t *) (void *)
396 			((xge_hal_device_t *)ring->channel.devh)->bar0;
397 
398 	queue = &ring->config->queue[ring->channel.post_qid];
399 	xge_assert(queue->buffer_mode == 1 ||
400 		    queue->buffer_mode == 3 ||
401 		    queue->buffer_mode == 5);
402 
403 	/* last block in fact becomes first. This is just the way it
404 	 * is filled up and linked by item_alloc() */
405 
406 	block_num = queue->initial;
407 	first_block = __hal_mempool_item(ring->mempool, block_num - 1);
408 	val64 = __hal_ring_item_dma_addr(ring->mempool,
409 					 first_block, &dma_handle);
410 	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
411 			val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
412 
413 	xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT" initialized",
414 			ring->channel.post_qid, (unsigned long long)val64);
415 
416 	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
417 		ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
418 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
419 	    !queue->rth_en) {
420 		val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
421 	}
422 	val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;
423 
424 	val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
425 	val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
426 	val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
427 		(hldev->config.pci_freq_mherz * queue->backoff_interval_us));
428 
429 	/* Beware: no snoop by the bridge if (no_snoop_bits) */
430 	val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);
431 
432         /* Herc: always use group_reads */
433 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
434 			val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
435 
436 	if (hldev->config.bimodal_interrupts)
437 		if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
438 			val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT;
439 
440 	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
441 			val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
442 
443 	/* Configure Receive Protocol Assist */
444 	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
445 			ring->channel.regh0, &bar0->rx_pa_cfg);
446 	val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
447 	val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
448 	/* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
449 	val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
450 	val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);
451 
452 	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
453 			val64, &bar0->rx_pa_cfg);
454 
455 	xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
456 			ring->channel.post_qid, queue->buffer_mode);
457 }
458 
459 void
460 __hal_ring_prc_disable(xge_hal_channel_h channelh)
461 {
462 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
463 	xge_hal_pci_bar0_t *bar0;
464 	u64 val64;
465 
466 	xge_assert(ring);
467 	xge_assert(ring->channel.pdev);
468 	bar0 = (xge_hal_pci_bar0_t *) (void *)
469 			((xge_hal_device_t *)ring->channel.devh)->bar0;
470 
471 	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
472 	ring->channel.regh0,
473 			      &bar0->prc_ctrl_n[ring->channel.post_qid]);
474 	val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED);
475 	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
476 			val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
477 }
478 
479 void
480 __hal_ring_hw_initialize(xge_hal_device_h devh)
481 {
482 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
483 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
484 	u64 val64;
485 	int i, j;
486 
487 	/* Rx DMA intialization. */
488 
489 	val64 = 0;
490 	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
491 		if (!hldev->config.ring.queue[i].configured)
492 			continue;
493 		val64 |= vBIT(hldev->config.ring.queue[i].priority,
494 							(5 + (i * 8)), 3);
495 	}
496 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
497 			&bar0->rx_queue_priority);
498 	xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT,
499 			(unsigned long long)val64);
500 
501 	/* Configuring ring queues according to per-ring configuration */
502 	val64 = 0;
503 	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
504 		if (!hldev->config.ring.queue[i].configured)
505 			continue;
506 		val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
507 	}
508 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
509 	                     &bar0->rx_queue_cfg);
510 	xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT,
511 			(unsigned long long)val64);
512 
513     if (!hldev->config.rts_qos_steering_config) {
514 
515         /* Activate Rx steering */
516         val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
517                                       &bar0->rts_qos_steering);
518         for (j = 0; j < 8 /* QoS max */; j++)
519         {
520             for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
521             {
522                 if (!hldev->config.ring.queue[i].configured)
523                     continue;
524                 if (!hldev->config.ring.queue[i].rth_en)
525                     val64 |= (BIT(i) >> (j*8));
526             }
527         }
528         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
529                                &bar0->rts_qos_steering);
530         xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT,
531                        (unsigned long long)val64);
532 
533     }
534 	/* Note: If a queue does not exist, it should be assigned a maximum
535 	 *	 length of zero. Otherwise, packet loss could occur.
536 	 *	 P. 4-4 User guide.
537 	 *
538 	 * All configured rings will be properly set at device open time
539 	 * by utilizing device_mtu_set() API call. */
540 	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
541 		if (hldev->config.ring.queue[i].configured)
542 			continue;
543 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
544 		                     &bar0->rts_frm_len_n[i]);
545 	}
546 
547 #ifdef XGE_HAL_HERC_EMULATION
548 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
549 		((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
550 	val64 |= 0x0000000000010000;
551 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
552 		((u8 *)bar0 + 0x2e60));
553 
554 	val64 |= 0x003a000000000000;
555 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
556 		((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
557 	xge_os_mdelay(2000);
558 #endif
559 
560 	/* now enabling MC-RLDRAM after setting MC_QUEUE sizes */
561 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
562 	                            &bar0->mc_rldram_mrs);
563 	val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE |
564 		 XGE_HAL_MC_RLDRAM_MRS_ENABLE;
565 	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
566 	                     &bar0->mc_rldram_mrs);
567 	xge_os_wmb();
568 	__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
569 	                     &bar0->mc_rldram_mrs);
570 
571 	/* RLDRAM initialization procedure require 500us to complete */
572 	xge_os_mdelay(1);
573 
574 	/* Temporary fixes for Herc RLDRAM */
575 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
576 		val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
577 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
578 		                     &bar0->mc_rldram_ref_per_herc);
579 
580 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
581 	                            &bar0->mc_rldram_mrs_herc);
582 		xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT,
583 			       (unsigned long long)val64);
584 
585 		val64 = 0x0003570003010300ULL;
586 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
587 		                       &bar0->mc_rldram_mrs_herc);
588 
589 		xge_os_mdelay(1);
590 	}
591 	xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
592 }
593 
594 void
595 __hal_ring_mtu_set(xge_hal_device_h devh, int new_frmlen)
596 {
597 	int i;
598 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
599 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
600 
601 	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
602 		if (!hldev->config.ring.queue[i].configured)
603 			continue;
604 		if (hldev->config.ring.queue[i].max_frm_len !=
605 						XGE_HAL_RING_USE_MTU) {
606 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
607 			        XGE_HAL_MAC_RTS_FRM_LEN_SET(
608 				hldev->config.ring.queue[i].max_frm_len),
609 				&bar0->rts_frm_len_n[i]);
610 		} else {
611 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
612 				       XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen),
613 				       &bar0->rts_frm_len_n[i]);
614 		}
615 	}
616 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
617 			       XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen),
618 				       &bar0->rmac_max_pyld_len);
619 }
620