xref: /illumos-gate/usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring.c (revision 0173c38a73f34277e0c97a19fedfd25d81ba8380)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  *  Copyright (c) 2002-2005 Neterion, Inc.
24  *  All right Reserved.
25  *
26  *  FileName :    hal-ring.c
27  *
28  *  Description:  Rx ring object implementation
29  *
30  *  Created:      10 May 2004
31  */
32 
33 #include "xgehal-ring.h"
34 #include "xgehal-device.h"
35 
36 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
37 static ptrdiff_t
38 __hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh,
39 			   void *item)
40 {
41 	int memblock_idx;
42 	void *memblock;
43 
44 	/* get owner memblock index */
45 	memblock_idx = __hal_ring_block_memblock_idx(item);
46 
47 	/* get owner memblock by memblock index */
48 	memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
49 
50 	return (char*)item - (char*)memblock;
51 }
52 #endif
53 
54 static dma_addr_t
55 __hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
56 		pci_dma_h *dma_handle)
57 {
58 	int memblock_idx;
59 	void *memblock;
60 	xge_hal_mempool_dma_t *memblock_dma_object;
61 	ptrdiff_t dma_item_offset;
62 
63 	/* get owner memblock index */
64 	memblock_idx = __hal_ring_block_memblock_idx(item);
65 
66 	/* get owner memblock by memblock index */
67 	memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
68 
69 	/* get memblock DMA object by memblock index */
70 	memblock_dma_object =
71 		__hal_mempool_memblock_dma(mempoolh, memblock_idx);
72 
73 	/* calculate offset in the memblock of this item */
74 	dma_item_offset = (char*)item - (char*)memblock;
75 
76 	*dma_handle = memblock_dma_object->handle;
77 
78 	return memblock_dma_object->addr + dma_item_offset;
79 }
80 
81 static void
82 __hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
83 			 xge_hal_ring_t *ring, int from, int to)
84 {
85 	xge_hal_ring_block_t *to_item, *from_item;
86 	dma_addr_t to_dma, from_dma;
87 	pci_dma_h to_dma_handle, from_dma_handle;
88 
89 	/* get "from" RxD block */
90 	from_item = __hal_mempool_item(mempoolh, from);
91 	xge_assert(from_item);
92 
93 	/* get "to" RxD block */
94 	to_item = __hal_mempool_item(mempoolh, to);
95 	xge_assert(to_item);
96 
97 	/* return address of the beginning of previous RxD block */
98 	to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
99 
100 	/* set next pointer for this RxD block to point on
101 	 * previous item's DMA start address */
102 	__hal_ring_block_next_pointer_set(from_item, to_dma);
103 
104 	/* return "from" RxD block's DMA start address */
105 	from_dma =
106 		__hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle);
107 
108 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
109 	/* we must sync "from" RxD block, so hardware will see it */
110 	xge_os_dma_sync(ring->channel.pdev,
111 	              from_dma_handle,
112 		      from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
113 		      __hal_ring_item_dma_offset(mempoolh, from_item) +
114 					XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
115 		      sizeof(u64),
116 		      XGE_OS_DMA_DIR_TODEVICE);
117 #endif
118 
119 	xge_debug_ring(XGE_TRACE, "block%d:0x%llx => block%d:0x%llx",
120 		from, (unsigned long long)from_dma, to,
121 		(unsigned long long)to_dma);
122 }
123 
124 static xge_hal_status_e
125 __hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
126 			      void *memblock,
127 			      int memblock_index,
128 			      xge_hal_mempool_dma_t *dma_object,
129 			      void *item,
130 			      int index,
131 			      int is_last,
132 			      void *userdata)
133 {
134 	int i;
135 	xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata;
136 
137 	xge_assert(item);
138 	xge_assert(ring);
139 
140 
141 	/* format rxds array */
142 	for (i=ring->rxds_per_block-1; i>=0; i--) {
143 		void *rxdblock_priv;
144 		xge_hal_ring_rxd_priv_t *rxd_priv;
145 		xge_hal_ring_rxd_1_t *rxdp;
146 		int reserve_index = index * ring->rxds_per_block + i;
147 		int memblock_item_idx;
148 
149 		ring->reserved_rxds_arr[reserve_index] = (char *)item +
150 				(ring->rxds_per_block - 1 - i) * ring->rxd_size;
151 
152 		/* Note: memblock_item_idx is index of the item within
153 		 *       the memblock. For instance, in case of three RxD-blocks
154 		 *       per memblock this value can be 0,1 or 2. */
155 		rxdblock_priv =
156 			__hal_mempool_item_priv(mempoolh, memblock_index, item,
157 						&memblock_item_idx);
158 		rxdp = (xge_hal_ring_rxd_1_t *)
159 			ring->reserved_rxds_arr[reserve_index];
160 		rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
161 			((char*)rxdblock_priv + ring->rxd_priv_size * i);
162 
163 		/* pre-format per-RxD Ring's private */
164 		rxd_priv->dma_offset = (char*)rxdp - (char*)memblock;
165 		rxd_priv->dma_addr = dma_object->addr +  rxd_priv->dma_offset;
166 		rxd_priv->dma_handle = dma_object->handle;
167 #ifdef XGE_DEBUG_ASSERT
168 		rxd_priv->dma_object = dma_object;
169 #endif
170 
171 		/* pre-format Host_Control */
172 #if defined(XGE_HAL_USE_5B_MODE)
173 		if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
174 			xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp;
175 #if defined(XGE_OS_PLATFORM_64BIT)
176 			xge_assert(memblock_index <= 0xFFFF);
177 			xge_assert(i <= 0xFFFF);
178 			/* store memblock's index */
179 			rxdp_5->host_control = (u32)memblock_index << 16;
180 			/* store index of memblock's private */
181 			rxdp_5->host_control |= (u32)(memblock_item_idx *
182 						    ring->rxds_per_block + i);
183 #else
184 			/* 32-bit case */
185 			rxdp_5->host_control = (u32)rxd_priv;
186 #endif
187 		} else {
188 			/* 1b and 3b modes */
189 			rxdp->host_control = (u64)(ulong_t)rxd_priv;
190 		}
191 #else
192 		/* 1b and 3b modes */
193 		rxdp->host_control = (u64)(ulong_t)rxd_priv;
194 #endif
195 	}
196 
197 	__hal_ring_block_memblock_idx_set(item, memblock_index);
198 
199 	if (is_last) {
200 		/* link last one with first one */
201 		__hal_ring_rxdblock_link(mempoolh, ring, 0, index);
202 	}
203 
204 	if (index > 0 ) {
205 		 /* link this RxD block with previous one */
206 		__hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
207 	}
208 
209 	return XGE_HAL_OK;
210 }
211 
212  xge_hal_status_e
213 __hal_ring_initial_replenish(xge_hal_channel_t *channel,
214 			     xge_hal_channel_reopen_e reopen)
215 {
216 	xge_hal_dtr_h dtr;
217 
218 	while (__hal_channel_dtr_count(channel) > 0) {
219 		xge_hal_status_e status;
220 
221 		status = xge_hal_ring_dtr_reserve(channel, &dtr);
222 		xge_assert(status == XGE_HAL_OK);
223 
224 		if (channel->dtr_init) {
225 		    status = channel->dtr_init(channel,
226                                         dtr, channel->reserve_length,
227                                         channel->userdata,
228 					reopen);
229 			if (status != XGE_HAL_OK) {
230 				xge_hal_ring_dtr_free(channel, dtr);
231 				xge_hal_channel_abort(channel,
232 					XGE_HAL_CHANNEL_OC_NORMAL);
233 				return status;
234 			}
235 		}
236 
237 		xge_hal_ring_dtr_post(channel, dtr);
238 	}
239 
240 	return XGE_HAL_OK;
241 }
242 
243 xge_hal_status_e
244 __hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
245 {
246 	xge_hal_status_e status;
247 	xge_hal_device_t *hldev;
248 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
249 	xge_hal_ring_queue_t *queue;
250 
251 
252 	/* Note: at this point we have channel.devh and channel.pdev
253 	 *       pre-set only! */
254 
255 	hldev = (xge_hal_device_t *)ring->channel.devh;
256 	ring->config = &hldev->config.ring;
257 	queue = &ring->config->queue[attr->post_qid];
258 	ring->indicate_max_pkts = queue->indicate_max_pkts;
259 	ring->buffer_mode = queue->buffer_mode;
260 
261 	xge_assert(queue->configured);
262 
263 #if defined(XGE_HAL_RX_MULTI_RESERVE)
264 	xge_os_spin_lock_init(&ring->channel.reserve_lock, hldev->pdev);
265 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
266 	xge_os_spin_lock_init_irq(&ring->channel.reserve_lock, hldev->irqh);
267 #endif
268 #if defined(XGE_HAL_RX_MULTI_POST)
269 	xge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
270 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
271 	xge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
272 #endif
273 
274 	ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
275 	ring->rxd_priv_size =
276 		sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;
277 
278 	/* how many RxDs can fit into one block. Depends on configured
279 	 * buffer_mode. */
280 	ring->rxds_per_block = XGE_HAL_RING_RXDS_PER_BLOCK(queue->buffer_mode);
281 
282 	/* calculate actual RxD block private size */
283 	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
284 
285 	ring->reserved_rxds_arr = xge_os_malloc(ring->channel.pdev,
286 		      sizeof(void*) * queue->max * ring->rxds_per_block);
287 	if (ring->reserved_rxds_arr == NULL) {
288 		__hal_ring_close(channelh);
289 		return XGE_HAL_ERR_OUT_OF_MEMORY;
290 	}
291 
292 	ring->mempool = __hal_mempool_create(
293 				     hldev->pdev,
294 				     ring->config->memblock_size,
295 				     XGE_HAL_RING_RXDBLOCK_SIZE,
296 				     ring->rxdblock_priv_size,
297 				     queue->initial, queue->max,
298 				     __hal_ring_mempool_item_alloc,
299 				     NULL, /* nothing to free */
300 				     ring);
301 	if (ring->mempool == NULL) {
302 		__hal_ring_close(channelh);
303 		return XGE_HAL_ERR_OUT_OF_MEMORY;
304 	}
305 
306 	status = __hal_channel_initialize(channelh,
307 					  attr,
308 					  ring->reserved_rxds_arr,
309 					  queue->initial * ring->rxds_per_block,
310 					  queue->max * ring->rxds_per_block,
311 					  0 /* no threshold for ring! */);
312 	if (status != XGE_HAL_OK) {
313 		__hal_ring_close(channelh);
314 		return status;
315 	}
316 
317 	/* sanity check that everything formatted ok */
318 	xge_assert(ring->reserved_rxds_arr[0] ==
319 		    (char *)ring->mempool->items_arr[0] +
320 		      (ring->rxds_per_block * ring->rxd_size - ring->rxd_size));
321 
322         /* Note:
323 	 * Specifying dtr_init callback means two things:
324 	 * 1) dtrs need to be initialized by ULD at channel-open time;
325 	 * 2) dtrs need to be posted at channel-open time
326 	 *    (that's what the initial_replenish() below does)
327 	 * Currently we don't have a case when the 1) is done without the 2).
328 	 */
329 	if (ring->channel.dtr_init) {
330 		if ((status = __hal_ring_initial_replenish(channelh,
331 						XGE_HAL_CHANNEL_OC_NORMAL))
332 							!= XGE_HAL_OK) {
333 			__hal_ring_close(channelh);
334 			return status;
335 		}
336 	}
337 
338 	return XGE_HAL_OK;
339 }
340 
341 void
342 __hal_ring_close(xge_hal_channel_h channelh)
343 {
344 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
345 	xge_hal_ring_queue_t *queue;
346 #if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
347     defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
348 	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
349 #endif
350 
351 	xge_assert(ring->channel.pdev);
352 
353 	queue = &ring->config->queue[ring->channel.post_qid];
354 
355 	if (ring->mempool) {
356 		__hal_mempool_destroy(ring->mempool);
357 	}
358 
359 	if (ring->reserved_rxds_arr) {
360 		xge_os_free(ring->channel.pdev,
361 		          ring->reserved_rxds_arr,
362 			  sizeof(void*) * queue->max * ring->rxds_per_block);
363 	}
364 
365 	__hal_channel_terminate(channelh);
366 
367 #if defined(XGE_HAL_RX_MULTI_RESERVE)
368 	xge_os_spin_lock_destroy(&ring->channel.reserve_lock, hldev->pdev);
369 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
370 	xge_os_spin_lock_destroy_irq(&ring->channel.reserve_lock, hldev->pdev);
371 #endif
372 #if defined(XGE_HAL_RX_MULTI_POST)
373 	xge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
374 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
375 	xge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
376 #endif
377 }
378 
379 void
380 __hal_ring_prc_enable(xge_hal_channel_h channelh)
381 {
382 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
383 	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
384 	xge_hal_pci_bar0_t *bar0;
385 	u64 val64;
386 	void *first_block;
387 	int block_num;
388 	xge_hal_ring_queue_t *queue;
389 	pci_dma_h dma_handle;
390 
391 	xge_assert(ring);
392 	xge_assert(ring->channel.pdev);
393 	bar0 = (xge_hal_pci_bar0_t *) (void *)
394 			((xge_hal_device_t *)ring->channel.devh)->bar0;
395 
396 	queue = &ring->config->queue[ring->channel.post_qid];
397 	xge_assert(queue->buffer_mode == 1 ||
398 		    queue->buffer_mode == 3 ||
399 		    queue->buffer_mode == 5);
400 
401 	/* last block in fact becomes first. This is just the way it
402 	 * is filled up and linked by item_alloc() */
403 
404 	block_num = queue->initial;
405 	first_block = __hal_mempool_item(ring->mempool, block_num - 1);
406 	val64 = __hal_ring_item_dma_addr(ring->mempool,
407 					 first_block, &dma_handle);
408 	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
409 			val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
410 
411 	xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x%llx initialized",
412 			ring->channel.post_qid, (unsigned long long)val64);
413 
414 	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
415 		ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
416 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
417 	    !queue->rth_en) {
418 		val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
419 	}
420 	val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;
421 
422 	val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
423 	val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
424 	val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
425 		(hldev->config.pci_freq_mherz * queue->backoff_interval_us));
426 
427 	/* Beware: no snoop by the bridge if (no_snoop_bits) */
428 	val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);
429 
430         /* Herc: always use group_reads */
431 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
432 	        val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
433 
434 	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
435 			val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
436 
437 	/* Configure Receive Protocol Assist */
438 	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
439 			ring->channel.regh0, &bar0->rx_pa_cfg);
440 	val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
441 	val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
442 	/* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
443 	val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
444 	val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);
445 
446 	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
447 			val64, &bar0->rx_pa_cfg);
448 
449 	xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
450 			ring->channel.post_qid, queue->buffer_mode);
451 }
452 
453 void
454 __hal_ring_prc_disable(xge_hal_channel_h channelh)
455 {
456 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
457 	xge_hal_pci_bar0_t *bar0;
458 	u64 val64;
459 
460 	xge_assert(ring);
461 	xge_assert(ring->channel.pdev);
462 	bar0 = (xge_hal_pci_bar0_t *) (void *)
463 			((xge_hal_device_t *)ring->channel.devh)->bar0;
464 
465 	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
466 	ring->channel.regh0,
467 			      &bar0->prc_ctrl_n[ring->channel.post_qid]);
468 	val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED);
469 	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
470 			val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
471 }
472 
473 void
474 __hal_ring_hw_initialize(xge_hal_device_h devh)
475 {
476 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
477 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
478 	u64 val64;
479 	int i, j;
480 
481 	/* Rx DMA intialization. */
482 
483 	val64 = 0;
484 	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
485 		if (!hldev->config.ring.queue[i].configured)
486 			continue;
487 		val64 |= vBIT(hldev->config.ring.queue[i].priority,
488 							(5 + (i * 8)), 3);
489 	}
490 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
491 			&bar0->rx_queue_priority);
492 	xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x%llx",
493 			(unsigned long long)val64);
494 
495 	/* Configuring ring queues according to per-ring configuration */
496 	val64 = 0;
497 	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
498 		if (!hldev->config.ring.queue[i].configured)
499 			continue;
500 		val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
501 	}
502 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
503 	                     &bar0->rx_queue_cfg);
504 	xge_debug_ring(XGE_TRACE, "DRAM configured to 0x%llx",
505 			(unsigned long long)val64);
506 
507 	/* Activate Rx steering */
508 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
509 	                            &bar0->rts_qos_steering);
510 	for (j = 0; j < 8 /* QoS max */; j++) {
511 		for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
512 			if (!hldev->config.ring.queue[i].configured)
513 				continue;
514 			if (!hldev->config.ring.queue[i].rth_en)
515 				val64 |= (BIT(i) >> (j*8));
516 		}
517 	}
518 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
519 	                     &bar0->rts_qos_steering);
520 	xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x%llx",
521 			(unsigned long long)val64);
522 
523 	/* Note: If a queue does not exist, it should be assigned a maximum
524 	 *	 length of zero. Otherwise, packet loss could occur.
525 	 *	 P. 4-4 User guide.
526 	 *
527 	 * All configured rings will be properly set at device open time
528 	 * by utilizing device_mtu_set() API call. */
529 	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
530 		if (hldev->config.ring.queue[i].configured)
531 			continue;
532 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
533 		                     &bar0->rts_frm_len_n[i]);
534 	}
535 
536 #ifdef XGE_HAL_HERC_EMULATION
537 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
538 		((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
539 	val64 |= 0x0000000000010000;
540 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
541 		((u8 *)bar0 + 0x2e60));
542 
543 	val64 |= 0x003a000000000000;
544 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
545 		((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
546 	xge_os_mdelay(2000);
547 #endif
548 
549 	/* now enabling MC-RLDRAM after setting MC_QUEUE sizes */
550 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
551 	                            &bar0->mc_rldram_mrs);
552 	val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE |
553 		 XGE_HAL_MC_RLDRAM_MRS_ENABLE;
554 	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
555 	                     &bar0->mc_rldram_mrs);
556 	xge_os_wmb();
557 	__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
558 	                     &bar0->mc_rldram_mrs);
559 
560 	/* RLDRAM initialization procedure require 500us to complete */
561 	xge_os_mdelay(1);
562 
563 	/* Temporary fixes for Herc RLDRAM */
564 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
565 		val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
566 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
567 		                     &bar0->mc_rldram_ref_per_herc);
568 
569 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
570 	                            &bar0->mc_rldram_mrs_herc);
571 		xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x%llx",
572 			       (unsigned long long)val64);
573 
574 		val64 = 0x0003570003010300ULL;
575 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
576 		                       &bar0->mc_rldram_mrs_herc);
577 
578 		xge_os_mdelay(1);
579 	}
580 	xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
581 }
582 
583 void
584 __hal_ring_mtu_set(xge_hal_device_h devh, int new_frmlen)
585 {
586 	int i;
587 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
588 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
589 
590 	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
591 		if (!hldev->config.ring.queue[i].configured)
592 			continue;
593 		if (hldev->config.ring.queue[i].max_frm_len !=
594 						XGE_HAL_RING_USE_MTU) {
595 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
596 			        XGE_HAL_MAC_RTS_FRM_LEN_SET(
597 				hldev->config.ring.queue[i].max_frm_len),
598 				&bar0->rts_frm_len_n[i]);
599 		} else {
600 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
601 				       XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen),
602 				       &bar0->rts_frm_len_n[i]);
603 		}
604 	}
605 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
606 			       XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen),
607 				       &bar0->rmac_max_pyld_len);
608 }
609