1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Copyright (c) 2002-2006 Neterion, Inc.
22 */
23
24 #include "xgehal-ring.h"
25 #include "xgehal-device.h"
26
27 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
28 static ptrdiff_t
__hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh,void * item)29 __hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh,
30 void *item)
31 {
32 int memblock_idx;
33 void *memblock;
34
35 /* get owner memblock index */
36 memblock_idx = __hal_ring_block_memblock_idx(item);
37
38 /* get owner memblock by memblock index */
39 memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
40
41 return (char*)item - (char*)memblock;
42 }
43 #endif
44
45 static dma_addr_t
__hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh,void * item,pci_dma_h * dma_handle)46 __hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
47 pci_dma_h *dma_handle)
48 {
49 int memblock_idx;
50 void *memblock;
51 xge_hal_mempool_dma_t *memblock_dma_object;
52 ptrdiff_t dma_item_offset;
53
54 /* get owner memblock index */
55 memblock_idx = __hal_ring_block_memblock_idx((xge_hal_ring_block_t *) item);
56
57 /* get owner memblock by memblock index */
58 memblock = __hal_mempool_memblock((xge_hal_mempool_t *) mempoolh,
59 memblock_idx);
60
61 /* get memblock DMA object by memblock index */
62 memblock_dma_object =
63 __hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh,
64 memblock_idx);
65
66 /* calculate offset in the memblock of this item */
67 dma_item_offset = (char*)item - (char*)memblock;
68
69 *dma_handle = memblock_dma_object->handle;
70
71 return memblock_dma_object->addr + dma_item_offset;
72 }
73
74 static void
__hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,xge_hal_ring_t * ring,int from,int to)75 __hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
76 xge_hal_ring_t *ring, int from, int to)
77 {
78 xge_hal_ring_block_t *to_item, *from_item;
79 dma_addr_t to_dma, from_dma __unused;
80 pci_dma_h to_dma_handle, from_dma_handle;
81
82 /* get "from" RxD block */
83 from_item = (xge_hal_ring_block_t *)
84 __hal_mempool_item((xge_hal_mempool_t *) mempoolh, from);
85 xge_assert(from_item);
86
87 /* get "to" RxD block */
88 to_item = (xge_hal_ring_block_t *)
89 __hal_mempool_item((xge_hal_mempool_t *) mempoolh, to);
90 xge_assert(to_item);
91
92 /* return address of the beginning of previous RxD block */
93 to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
94
95 /* set next pointer for this RxD block to point on
96 * previous item's DMA start address */
97 __hal_ring_block_next_pointer_set(from_item, to_dma);
98
99 /* return "from" RxD block's DMA start address */
100 from_dma =
101 __hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle);
102
103 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
104 /* we must sync "from" RxD block, so hardware will see it */
105 xge_os_dma_sync(ring->channel.pdev,
106 from_dma_handle,
107 from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
108 __hal_ring_item_dma_offset(mempoolh, from_item) +
109 XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
110 sizeof(u64),
111 XGE_OS_DMA_DIR_TODEVICE);
112 #endif
113
114 xge_debug_ring(XGE_TRACE, "block%d:0x"XGE_OS_LLXFMT" => block%d:0x"XGE_OS_LLXFMT,
115 from, (unsigned long long)from_dma, to,
116 (unsigned long long)to_dma);
117 }
118
119 static xge_hal_status_e
__hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,void * memblock,int memblock_index,xge_hal_mempool_dma_t * dma_object,void * item,int index,int is_last,void * userdata)120 __hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
121 void *memblock,
122 int memblock_index,
123 xge_hal_mempool_dma_t *dma_object,
124 void *item,
125 int index,
126 int is_last,
127 void *userdata)
128 {
129 int i;
130 xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata;
131
132 xge_assert(item);
133 xge_assert(ring);
134
135
136 /* format rxds array */
137 for (i=ring->rxds_per_block-1; i>=0; i--) {
138 void *rxdblock_priv;
139 xge_hal_ring_rxd_priv_t *rxd_priv;
140 xge_hal_ring_rxd_1_t *rxdp;
141 int reserve_index = index * ring->rxds_per_block + i;
142 int memblock_item_idx;
143
144 ring->reserved_rxds_arr[reserve_index] = (char *)item +
145 (ring->rxds_per_block - 1 - i) * ring->rxd_size;
146
147 /* Note: memblock_item_idx is index of the item within
148 * the memblock. For instance, in case of three RxD-blocks
149 * per memblock this value can be 0,1 or 2. */
150 rxdblock_priv =
151 __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
152 memblock_index, item,
153 &memblock_item_idx);
154 rxdp = (xge_hal_ring_rxd_1_t *)
155 ring->reserved_rxds_arr[reserve_index];
156 rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
157 ((char*)rxdblock_priv + ring->rxd_priv_size * i);
158
159 /* pre-format per-RxD Ring's private */
160 rxd_priv->dma_offset = (char*)rxdp - (char*)memblock;
161 rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset;
162 rxd_priv->dma_handle = dma_object->handle;
163 #ifdef XGE_DEBUG_ASSERT
164 rxd_priv->dma_object = dma_object;
165 #endif
166
167 /* pre-format Host_Control */
168 #if defined(XGE_HAL_USE_5B_MODE)
169 if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
170 xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp;
171 #if defined(XGE_OS_PLATFORM_64BIT)
172 xge_assert(memblock_index <= 0xFFFF);
173 xge_assert(i <= 0xFFFF);
174 /* store memblock's index */
175 rxdp_5->host_control = (u32)memblock_index << 16;
176 /* store index of memblock's private */
177 rxdp_5->host_control |= (u32)(memblock_item_idx *
178 ring->rxds_per_block + i);
179 #else
180 /* 32-bit case */
181 rxdp_5->host_control = (u32)rxd_priv;
182 #endif
183 } else {
184 /* 1b and 3b modes */
185 rxdp->host_control = (u64)(ulong_t)rxd_priv;
186 }
187 #else
188 /* 1b and 3b modes */
189 rxdp->host_control = (u64)(ulong_t)rxd_priv;
190 #endif
191 }
192
193 __hal_ring_block_memblock_idx_set((xge_hal_ring_block_t *) item, memblock_index);
194
195 if (is_last) {
196 /* link last one with first one */
197 __hal_ring_rxdblock_link(mempoolh, ring, 0, index);
198 }
199
200 if (index > 0 ) {
201 /* link this RxD block with previous one */
202 __hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
203 }
204
205 return XGE_HAL_OK;
206 }
207
208 xge_hal_status_e
__hal_ring_initial_replenish(xge_hal_channel_t * channel,xge_hal_channel_reopen_e reopen)209 __hal_ring_initial_replenish(xge_hal_channel_t *channel,
210 xge_hal_channel_reopen_e reopen)
211 {
212 xge_hal_dtr_h dtr = NULL;
213
214 while (xge_hal_channel_dtr_count(channel) > 0) {
215 xge_hal_status_e status;
216
217 status = xge_hal_ring_dtr_reserve(channel, &dtr);
218 xge_assert(status == XGE_HAL_OK);
219
220 if (channel->dtr_init) {
221 status = channel->dtr_init(channel,
222 dtr, channel->reserve_length,
223 channel->userdata,
224 reopen);
225 if (status != XGE_HAL_OK) {
226 xge_hal_ring_dtr_free(channel, dtr);
227 xge_hal_channel_abort(channel,
228 XGE_HAL_CHANNEL_OC_NORMAL);
229 return status;
230 }
231 }
232
233 xge_hal_ring_dtr_post(channel, dtr);
234 }
235
236 return XGE_HAL_OK;
237 }
238
239 xge_hal_status_e
__hal_ring_open(xge_hal_channel_h channelh,xge_hal_channel_attr_t * attr)240 __hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
241 {
242 xge_hal_status_e status;
243 xge_hal_device_t *hldev;
244 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
245 xge_hal_ring_queue_t *queue;
246
247
248 /* Note: at this point we have channel.devh and channel.pdev
249 * pre-set only! */
250
251 hldev = (xge_hal_device_t *)ring->channel.devh;
252 ring->config = &hldev->config.ring;
253 queue = &ring->config->queue[attr->post_qid];
254 ring->indicate_max_pkts = queue->indicate_max_pkts;
255 ring->buffer_mode = queue->buffer_mode;
256
257 xge_assert(queue->configured);
258
259 #if defined(XGE_HAL_RX_MULTI_RESERVE)
260 xge_os_spin_lock_init(&ring->channel.reserve_lock, hldev->pdev);
261 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
262 xge_os_spin_lock_init_irq(&ring->channel.reserve_lock, hldev->irqh);
263 #endif
264 #if defined(XGE_HAL_RX_MULTI_POST)
265 xge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
266 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
267 xge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
268 #endif
269
270 ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
271 ring->rxd_priv_size =
272 sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;
273
274 /* how many RxDs can fit into one block. Depends on configured
275 * buffer_mode. */
276 ring->rxds_per_block = XGE_HAL_RING_RXDS_PER_BLOCK(queue->buffer_mode);
277
278 /* calculate actual RxD block private size */
279 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
280
281 ring->reserved_rxds_arr = (void **) xge_os_malloc(ring->channel.pdev,
282 sizeof(void*) * queue->max * ring->rxds_per_block);
283
284 if (ring->reserved_rxds_arr == NULL) {
285 __hal_ring_close(channelh);
286 return XGE_HAL_ERR_OUT_OF_MEMORY;
287 }
288
289 ring->mempool = __hal_mempool_create(
290 hldev->pdev,
291 ring->config->memblock_size,
292 XGE_HAL_RING_RXDBLOCK_SIZE,
293 ring->rxdblock_priv_size,
294 queue->initial, queue->max,
295 __hal_ring_mempool_item_alloc,
296 NULL, /* nothing to free */
297 ring);
298 if (ring->mempool == NULL) {
299 __hal_ring_close(channelh);
300 return XGE_HAL_ERR_OUT_OF_MEMORY;
301 }
302
303 status = __hal_channel_initialize(channelh,
304 attr,
305 ring->reserved_rxds_arr,
306 queue->initial * ring->rxds_per_block,
307 queue->max * ring->rxds_per_block,
308 0 /* no threshold for ring! */);
309 if (status != XGE_HAL_OK) {
310 __hal_ring_close(channelh);
311 return status;
312 }
313
314 /* sanity check that everything formatted ok */
315 xge_assert(ring->reserved_rxds_arr[0] ==
316 (char *)ring->mempool->items_arr[0] +
317 (ring->rxds_per_block * ring->rxd_size - ring->rxd_size));
318
319 /* Note:
320 * Specifying dtr_init callback means two things:
321 * 1) dtrs need to be initialized by ULD at channel-open time;
322 * 2) dtrs need to be posted at channel-open time
323 * (that's what the initial_replenish() below does)
324 * Currently we don't have a case when the 1) is done without the 2).
325 */
326 if (ring->channel.dtr_init) {
327 if ((status = __hal_ring_initial_replenish (
328 (xge_hal_channel_t *) channelh,
329 XGE_HAL_CHANNEL_OC_NORMAL) )
330 != XGE_HAL_OK) {
331 __hal_ring_close(channelh);
332 return status;
333 }
334 }
335
336 /* initial replenish will increment the counter in its post() routine,
337 * we have to reset it */
338 ring->channel.usage_cnt = 0;
339
340 return XGE_HAL_OK;
341 }
342
343 void
__hal_ring_close(xge_hal_channel_h channelh)344 __hal_ring_close(xge_hal_channel_h channelh)
345 {
346 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
347 xge_hal_ring_queue_t *queue;
348 #if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
349 defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
350 xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
351 #endif
352
353 xge_assert(ring->channel.pdev);
354
355 queue = &ring->config->queue[ring->channel.post_qid];
356
357 if (ring->mempool) {
358 __hal_mempool_destroy(ring->mempool);
359 }
360
361 if (ring->reserved_rxds_arr) {
362 xge_os_free(ring->channel.pdev,
363 ring->reserved_rxds_arr,
364 sizeof(void*) * queue->max * ring->rxds_per_block);
365 }
366
367 __hal_channel_terminate(channelh);
368
369 #if defined(XGE_HAL_RX_MULTI_RESERVE)
370 xge_os_spin_lock_destroy(&ring->channel.reserve_lock, hldev->pdev);
371 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
372 xge_os_spin_lock_destroy_irq(&ring->channel.reserve_lock, hldev->pdev);
373 #endif
374 #if defined(XGE_HAL_RX_MULTI_POST)
375 xge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
376 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
377 xge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
378 #endif
379 }
380
381 void
__hal_ring_prc_enable(xge_hal_channel_h channelh)382 __hal_ring_prc_enable(xge_hal_channel_h channelh)
383 {
384 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
385 xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
386 xge_hal_pci_bar0_t *bar0;
387 u64 val64;
388 void *first_block;
389 int block_num;
390 xge_hal_ring_queue_t *queue;
391 pci_dma_h dma_handle;
392
393 xge_assert(ring);
394 xge_assert(ring->channel.pdev);
395 bar0 = (xge_hal_pci_bar0_t *) (void *)
396 ((xge_hal_device_t *)ring->channel.devh)->bar0;
397
398 queue = &ring->config->queue[ring->channel.post_qid];
399 xge_assert(queue->buffer_mode == 1 ||
400 queue->buffer_mode == 3 ||
401 queue->buffer_mode == 5);
402
403 /* last block in fact becomes first. This is just the way it
404 * is filled up and linked by item_alloc() */
405
406 block_num = queue->initial;
407 first_block = __hal_mempool_item(ring->mempool, block_num - 1);
408 val64 = __hal_ring_item_dma_addr(ring->mempool,
409 first_block, &dma_handle);
410 xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
411 val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
412
413 xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT" initialized",
414 ring->channel.post_qid, (unsigned long long)val64);
415
416 val64 = xge_os_pio_mem_read64(ring->channel.pdev,
417 ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
418 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
419 !queue->rth_en) {
420 val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
421 }
422 val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;
423
424 val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
425 val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
426 val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
427 (hldev->config.pci_freq_mherz * queue->backoff_interval_us));
428
429 /* Beware: no snoop by the bridge if (no_snoop_bits) */
430 val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);
431
432 /* Herc: always use group_reads */
433 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
434 val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
435
436 if (hldev->config.bimodal_interrupts)
437 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
438 val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT;
439
440 xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
441 val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
442
443 /* Configure Receive Protocol Assist */
444 val64 = xge_os_pio_mem_read64(ring->channel.pdev,
445 ring->channel.regh0, &bar0->rx_pa_cfg);
446 val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
447 val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
448 /* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
449 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
450 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);
451
452 xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
453 val64, &bar0->rx_pa_cfg);
454
455 xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
456 ring->channel.post_qid, queue->buffer_mode);
457 }
458
459 void
__hal_ring_prc_disable(xge_hal_channel_h channelh)460 __hal_ring_prc_disable(xge_hal_channel_h channelh)
461 {
462 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
463 xge_hal_pci_bar0_t *bar0;
464 u64 val64;
465
466 xge_assert(ring);
467 xge_assert(ring->channel.pdev);
468 bar0 = (xge_hal_pci_bar0_t *) (void *)
469 ((xge_hal_device_t *)ring->channel.devh)->bar0;
470
471 val64 = xge_os_pio_mem_read64(ring->channel.pdev,
472 ring->channel.regh0,
473 &bar0->prc_ctrl_n[ring->channel.post_qid]);
474 val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED);
475 xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
476 val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
477 }
478
479 void
__hal_ring_hw_initialize(xge_hal_device_h devh)480 __hal_ring_hw_initialize(xge_hal_device_h devh)
481 {
482 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
483 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
484 u64 val64;
485 int i, j;
486
487 /* Rx DMA intialization. */
488
489 val64 = 0;
490 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
491 if (!hldev->config.ring.queue[i].configured)
492 continue;
493 val64 |= vBIT(hldev->config.ring.queue[i].priority,
494 (5 + (i * 8)), 3);
495 }
496 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
497 &bar0->rx_queue_priority);
498 xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT,
499 (unsigned long long)val64);
500
501 /* Configuring ring queues according to per-ring configuration */
502 val64 = 0;
503 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
504 if (!hldev->config.ring.queue[i].configured)
505 continue;
506 val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
507 }
508 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
509 &bar0->rx_queue_cfg);
510 xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT,
511 (unsigned long long)val64);
512
513 if (!hldev->config.rts_qos_en &&
514 !hldev->config.rts_port_en &&
515 !hldev->config.rts_mac_en) {
516
517 /*
518 * Activate default (QoS-based) Rx steering
519 */
520
521 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
522 &bar0->rts_qos_steering);
523 for (j = 0; j < 8 /* QoS max */; j++)
524 {
525 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
526 {
527 if (!hldev->config.ring.queue[i].configured)
528 continue;
529 if (!hldev->config.ring.queue[i].rth_en)
530 val64 |= (BIT(i) >> (j*8));
531 }
532 }
533 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
534 &bar0->rts_qos_steering);
535 xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT,
536 (unsigned long long)val64);
537
538 }
539
540 /* Note: If a queue does not exist, it should be assigned a maximum
541 * length of zero. Otherwise, packet loss could occur.
542 * P. 4-4 User guide.
543 *
544 * All configured rings will be properly set at device open time
545 * by utilizing device_mtu_set() API call. */
546 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
547 if (hldev->config.ring.queue[i].configured)
548 continue;
549 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
550 &bar0->rts_frm_len_n[i]);
551 }
552
553 #ifdef XGE_HAL_HERC_EMULATION
554 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
555 ((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
556 val64 |= 0x0000000000010000;
557 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
558 ((u8 *)bar0 + 0x2e60));
559
560 val64 |= 0x003a000000000000;
561 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
562 ((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
563 xge_os_mdelay(2000);
564 #endif
565
566 /* now enabling MC-RLDRAM after setting MC_QUEUE sizes */
567 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
568 &bar0->mc_rldram_mrs);
569 val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE |
570 XGE_HAL_MC_RLDRAM_MRS_ENABLE;
571 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
572 &bar0->mc_rldram_mrs);
573 xge_os_wmb();
574 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
575 &bar0->mc_rldram_mrs);
576
577 /* RLDRAM initialization procedure require 500us to complete */
578 xge_os_mdelay(1);
579
580 /* Temporary fixes for Herc RLDRAM */
581 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
582 val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
583 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
584 &bar0->mc_rldram_ref_per_herc);
585
586 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
587 &bar0->mc_rldram_mrs_herc);
588 xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT,
589 (unsigned long long)val64);
590
591 val64 = 0x0003570003010300ULL;
592 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
593 &bar0->mc_rldram_mrs_herc);
594
595 xge_os_mdelay(1);
596 }
597
598 if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
599 return;
600
601 /*
602 * Assign MSI-X vectors
603 */
604 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
605 xge_list_t *item;
606 xge_hal_channel_t *channel = NULL;
607
608 if (!hldev->config.ring.queue[i].configured ||
609 !hldev->config.ring.queue[i].intr_vector)
610 continue;
611
612 /* find channel */
613 xge_list_for_each(item, &hldev->free_channels) {
614 xge_hal_channel_t *tmp;
615 tmp = xge_container_of(item, xge_hal_channel_t,
616 item);
617 if (tmp->type == XGE_HAL_CHANNEL_TYPE_RING &&
618 tmp->post_qid == i) {
619 channel = tmp;
620 break;
621 }
622 }
623
624 if (channel) {
625 (void) xge_hal_channel_msix_set(channel,
626 hldev->config.ring.queue[i].intr_vector);
627 }
628 }
629
630 xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
631 }
632
633 void
__hal_ring_mtu_set(xge_hal_device_h devh,int new_frmlen)634 __hal_ring_mtu_set(xge_hal_device_h devh, int new_frmlen)
635 {
636 int i;
637 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
638 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
639
640 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
641 if (!hldev->config.ring.queue[i].configured)
642 continue;
643 if (hldev->config.ring.queue[i].max_frm_len !=
644 XGE_HAL_RING_USE_MTU) {
645 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
646 XGE_HAL_MAC_RTS_FRM_LEN_SET(
647 hldev->config.ring.queue[i].max_frm_len),
648 &bar0->rts_frm_len_n[i]);
649 } else {
650 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
651 XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen),
652 &bar0->rts_frm_len_n[i]);
653 }
654 }
655 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
656 XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen),
657 &bar0->rmac_max_pyld_len);
658 }
659