1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Copyright (c) 2002-2006 Neterion, Inc.
22 */
23
24 #include "xge-queue.h"
25
26 /**
27 * xge_queue_item_data - Get item's data.
28 * @item: Queue item.
29 *
30 * Returns: item data(variable size). Note that xge_queue_t
31 * contains items comprized of a fixed xge_queue_item_t "header"
32 * and a variable size data. This function returns the variable
33 * user-defined portion of the queue item.
34 */
xge_queue_item_data(xge_queue_item_t * item)35 void* xge_queue_item_data(xge_queue_item_t *item)
36 {
37 return (char *)item + sizeof(xge_queue_item_t);
38 }
39
40 /*
41 * __queue_consume - (Lockless) dequeue an item from the specified queue.
42 *
43 * @queue: Event queue.
44 * See xge_queue_consume().
45 */
46 static xge_queue_status_e
__queue_consume(xge_queue_t * queue,int data_max_size,xge_queue_item_t * item)47 __queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
48 {
49 int real_size;
50 xge_queue_item_t *elem;
51
52 if (xge_list_is_empty(&queue->list_head))
53 return XGE_QUEUE_IS_EMPTY;
54
55 elem = (xge_queue_item_t *)queue->list_head.next;
56 if (elem->data_size > data_max_size)
57 return XGE_QUEUE_NOT_ENOUGH_SPACE;
58
59 xge_list_remove(&elem->item);
60 real_size = elem->data_size + sizeof(xge_queue_item_t);
61 if (queue->head_ptr == elem) {
62 queue->head_ptr = (char *)queue->head_ptr + real_size;
63 xge_debug_queue(XGE_TRACE,
64 "event_type: %d removing from the head: "
65 "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
66 ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
67 elem->event_type,
68 (u64)(ulong_t)queue->start_ptr,
69 (u64)(ulong_t)queue->head_ptr,
70 (u64)(ulong_t)queue->tail_ptr,
71 (u64)(ulong_t)queue->end_ptr,
72 (u64)(ulong_t)elem,
73 real_size);
74 } else if ((char *)queue->tail_ptr - real_size == (char*)elem) {
75 queue->tail_ptr = (char *)queue->tail_ptr - real_size;
76 xge_debug_queue(XGE_TRACE,
77 "event_type: %d removing from the tail: "
78 "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
79 ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
80 elem->event_type,
81 (u64)(ulong_t)queue->start_ptr,
82 (u64)(ulong_t)queue->head_ptr,
83 (u64)(ulong_t)queue->tail_ptr,
84 (u64)(ulong_t)queue->end_ptr,
85 (u64)(ulong_t)elem,
86 real_size);
87 } else {
88 xge_debug_queue(XGE_TRACE,
89 "event_type: %d removing from the list: "
90 "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
91 ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
92 elem->event_type,
93 (u64)(ulong_t)queue->start_ptr,
94 (u64)(ulong_t)queue->head_ptr,
95 (u64)(ulong_t)queue->tail_ptr,
96 (u64)(ulong_t)queue->end_ptr,
97 (u64)(ulong_t)elem,
98 real_size);
99 }
100 xge_assert(queue->tail_ptr >= queue->head_ptr);
101 xge_assert(queue->tail_ptr >= queue->start_ptr &&
102 queue->tail_ptr <= queue->end_ptr);
103 xge_assert(queue->head_ptr >= queue->start_ptr &&
104 queue->head_ptr < queue->end_ptr);
105 xge_os_memcpy(item, elem, sizeof(xge_queue_item_t));
106 xge_os_memcpy(xge_queue_item_data(item), xge_queue_item_data(elem),
107 elem->data_size);
108
109 if (xge_list_is_empty(&queue->list_head)) {
110 /* reset buffer pointers just to be clean */
111 queue->head_ptr = queue->tail_ptr = queue->start_ptr;
112 }
113 return XGE_QUEUE_OK;
114 }
115
116 /**
117 * xge_queue_produce - Enqueue an item (see xge_queue_item_t{})
118 * into the specified queue.
119 * @queueh: Queue handle.
120 * @event_type: Event type. One of the enumerated event types
121 * that both consumer and producer "understand".
122 * For an example, please refer to xge_hal_event_e.
123 * @context: Opaque (void*) "context", for instance event producer object.
124 * @is_critical: For critical event, e.g. ECC.
125 * @data_size: Size of the @data.
126 * @data: User data of variable @data_size that is _copied_ into
127 * the new queue item (see xge_queue_item_t{}). Upon return
128 * from the call the @data memory can be re-used or released.
129 *
130 * Enqueue a new item.
131 *
132 * Returns: XGE_QUEUE_OK - success.
133 * XGE_QUEUE_IS_FULL - Queue is full.
134 * XGE_QUEUE_OUT_OF_MEMORY - Memory allocation failed.
135 *
136 * See also: xge_queue_item_t{}, xge_queue_consume().
137 */
138 xge_queue_status_e
xge_queue_produce(xge_queue_h queueh,int event_type,void * context,int is_critical,const int data_size,void * data)139 xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
140 int is_critical, const int data_size, void *data)
141 {
142 xge_queue_t *queue = (xge_queue_t *)queueh;
143 int real_size = data_size + sizeof(xge_queue_item_t);
144 xge_queue_item_t *elem;
145 unsigned long flags = 0;
146
147 xge_assert(real_size <= XGE_QUEUE_BUF_SIZE);
148
149 xge_os_spin_lock_irq(&queue->lock, flags);
150
151 if (is_critical && !queue->has_critical_event) {
152 unsigned char item_buf[sizeof(xge_queue_item_t) +
153 XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
154 xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
155 xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
156 XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
157
158 while (__queue_consume(queue,
159 XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
160 item) != XGE_QUEUE_IS_EMPTY)
161 ; /* do nothing */
162 }
163
164 try_again:
165 if ((char *)queue->tail_ptr + real_size <= (char *)queue->end_ptr) {
166 elem = (xge_queue_item_t *) queue->tail_ptr;
167 queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size);
168 xge_debug_queue(XGE_TRACE,
169 "event_type: %d adding to the tail: "
170 "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
171 ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
172 event_type,
173 (u64)(ulong_t)queue->start_ptr,
174 (u64)(ulong_t)queue->head_ptr,
175 (u64)(ulong_t)queue->tail_ptr,
176 (u64)(ulong_t)queue->end_ptr,
177 (u64)(ulong_t)elem,
178 real_size);
179 } else if ((char *)queue->head_ptr - real_size >=
180 (char *)queue->start_ptr) {
181 elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size);
182 queue->head_ptr = elem;
183 xge_debug_queue(XGE_TRACE,
184 "event_type: %d adding to the head: "
185 "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
186 ":0x"XGE_OS_LLXFMT" length %d",
187 event_type,
188 (u64)(ulong_t)queue->start_ptr,
189 (u64)(ulong_t)queue->head_ptr,
190 (u64)(ulong_t)queue->tail_ptr,
191 (u64)(ulong_t)queue->end_ptr,
192 real_size);
193 } else {
194 xge_queue_status_e status;
195
196 if (queue->pages_current >= queue->pages_max) {
197 xge_os_spin_unlock_irq(&queue->lock, flags);
198 return XGE_QUEUE_IS_FULL;
199 }
200
201 if (queue->has_critical_event) {
202 xge_os_spin_unlock_irq(&queue->lock, flags);
203 return XGE_QUEUE_IS_FULL;
204 }
205
206 /* grow */
207 status = __io_queue_grow(queueh);
208 if (status != XGE_QUEUE_OK) {
209 xge_os_spin_unlock_irq(&queue->lock, flags);
210 return status;
211 }
212
213 goto try_again;
214 }
215 xge_assert(queue->tail_ptr >= queue->head_ptr);
216 xge_assert(queue->tail_ptr >= queue->start_ptr &&
217 queue->tail_ptr <= queue->end_ptr);
218 xge_assert(queue->head_ptr >= queue->start_ptr &&
219 queue->head_ptr < queue->end_ptr);
220 elem->data_size = data_size;
221 elem->event_type = (xge_hal_event_e) event_type;
222 elem->is_critical = is_critical;
223 if (is_critical)
224 queue->has_critical_event = 1;
225 elem->context = context;
226 xge_os_memcpy(xge_queue_item_data(elem), data, data_size);
227 xge_list_insert_before(&elem->item, &queue->list_head);
228 xge_os_spin_unlock_irq(&queue->lock, flags);
229
230 /* no lock taken! */
231 queue->queued_func(queue->queued_data, event_type);
232
233 return XGE_QUEUE_OK;
234 }
235
236
237 /**
238 * xge_queue_create - Create protected first-in-first-out queue.
239 * @pdev: PCI device handle.
240 * @irqh: PCI device IRQ handle.
241 * @pages_initial: Number of pages to be initially allocated at the
242 * time of queue creation.
243 * @pages_max: Max number of pages that can be allocated in the queue.
244 * @queued: Optional callback function to be called each time a new item is
245 * added to the queue.
246 * @queued_data: Argument to the callback function.
247 *
248 * Create protected (fifo) queue.
249 *
250 * Returns: Pointer to xge_queue_t structure,
251 * NULL - on failure.
252 *
253 * See also: xge_queue_item_t{}, xge_queue_destroy().
254 */
255 xge_queue_h
xge_queue_create(pci_dev_h pdev,pci_irq_h irqh,int pages_initial,int pages_max,xge_queued_f queued,void * queued_data)256 xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
257 int pages_max, xge_queued_f queued, void *queued_data)
258 {
259 xge_queue_t *queue;
260
261 if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
262 return NULL;
263
264 queue->queued_func = queued;
265 queue->queued_data = queued_data;
266 queue->pdev = pdev;
267 queue->irqh = irqh;
268 queue->pages_current = pages_initial;
269 queue->start_ptr = xge_os_malloc(pdev, queue->pages_current *
270 XGE_QUEUE_BUF_SIZE);
271 if (queue->start_ptr == NULL) {
272 xge_os_free(pdev, queue, sizeof(xge_queue_t));
273 return NULL;
274 }
275 queue->head_ptr = queue->tail_ptr = queue->start_ptr;
276 queue->end_ptr = (char *)queue->start_ptr +
277 queue->pages_current * XGE_QUEUE_BUF_SIZE;
278 xge_os_spin_lock_init_irq(&queue->lock, irqh);
279 queue->pages_initial = pages_initial;
280 queue->pages_max = pages_max;
281 xge_list_init(&queue->list_head);
282
283 return queue;
284 }
285
286 /**
287 * xge_queue_destroy - Destroy xge_queue_t object.
288 * @queueh: Queue handle.
289 *
290 * Destroy the specified xge_queue_t object.
291 *
292 * See also: xge_queue_item_t{}, xge_queue_create().
293 */
xge_queue_destroy(xge_queue_h queueh)294 void xge_queue_destroy(xge_queue_h queueh)
295 {
296 xge_queue_t *queue = (xge_queue_t *)queueh;
297 xge_os_spin_lock_destroy_irq(&queue->lock, queue->irqh);
298 if (!xge_list_is_empty(&queue->list_head)) {
299 xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x"
300 XGE_OS_LLXFMT, (u64)(ulong_t)queue);
301 }
302 xge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
303 XGE_QUEUE_BUF_SIZE);
304
305 xge_os_free(queue->pdev, queue, sizeof(xge_queue_t));
306 }
307
308 /*
309 * __io_queue_grow - Dynamically increases the size of the queue.
310 * @queueh: Queue handle.
311 *
312 * This function is called in the case of no slot avaialble in the queue
313 * to accomodate the newly received event.
314 * Note that queue cannot grow beyond the max size specified for the
315 * queue.
316 *
317 * Returns XGE_QUEUE_OK: On success.
318 * XGE_QUEUE_OUT_OF_MEMORY : No memory is available.
319 */
320 xge_queue_status_e
__io_queue_grow(xge_queue_h queueh)321 __io_queue_grow(xge_queue_h queueh)
322 {
323 xge_queue_t *queue = (xge_queue_t *)queueh;
324 void *newbuf, *oldbuf;
325 xge_list_t *item;
326 xge_queue_item_t *elem;
327
328 xge_debug_queue(XGE_TRACE, "queue 0x"XGE_OS_LLXFMT":%d is growing",
329 (u64)(ulong_t)queue, queue->pages_current);
330
331 newbuf = xge_os_malloc(queue->pdev,
332 (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE);
333 if (newbuf == NULL)
334 return XGE_QUEUE_OUT_OF_MEMORY;
335
336 xge_os_memcpy(newbuf, queue->start_ptr,
337 queue->pages_current * XGE_QUEUE_BUF_SIZE);
338 oldbuf = queue->start_ptr;
339
340 /* adjust queue sizes */
341 queue->start_ptr = newbuf;
342 queue->end_ptr = (char *)newbuf +
343 (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE;
344 queue->tail_ptr = (char *)newbuf + ((char *)queue->tail_ptr -
345 (char *)oldbuf);
346 queue->head_ptr = (char *)newbuf + ((char *)queue->head_ptr -
347 (char *)oldbuf);
348 xge_assert(!xge_list_is_empty(&queue->list_head));
349 queue->list_head.next = (xge_list_t *) (void *)((char *)newbuf +
350 ((char *)queue->list_head.next - (char *)oldbuf));
351 queue->list_head.prev = (xge_list_t *) (void *)((char *)newbuf +
352 ((char *)queue->list_head.prev - (char *)oldbuf));
353 /* adjust queue list */
354 xge_list_for_each(item, &queue->list_head) {
355 elem = xge_container_of(item, xge_queue_item_t, item);
356 if (elem->item.next != &queue->list_head) {
357 elem->item.next =
358 (xge_list_t*)(void *)((char *)newbuf +
359 ((char *)elem->item.next - (char *)oldbuf));
360 }
361 if (elem->item.prev != &queue->list_head) {
362 elem->item.prev =
363 (xge_list_t*) (void *)((char *)newbuf +
364 ((char *)elem->item.prev - (char *)oldbuf));
365 }
366 }
367 xge_os_free(queue->pdev, oldbuf,
368 queue->pages_current * XGE_QUEUE_BUF_SIZE);
369 queue->pages_current++;
370
371 return XGE_QUEUE_OK;
372 }
373
374 /**
375 * xge_queue_consume - Dequeue an item from the specified queue.
376 * @queueh: Queue handle.
377 * @data_max_size: Maximum expected size of the item.
378 * @item: Memory area into which the item is _copied_ upon return
379 * from the function.
380 *
381 * Dequeue an item from the queue. The caller is required to provide
382 * enough space for the item.
383 *
384 * Returns: XGE_QUEUE_OK - success.
385 * XGE_QUEUE_IS_EMPTY - Queue is empty.
386 * XGE_QUEUE_NOT_ENOUGH_SPACE - Requested item size(@data_max_size)
387 * is too small to accomodate an item from the queue.
388 *
389 * See also: xge_queue_item_t{}, xge_queue_produce().
390 */
391 xge_queue_status_e
xge_queue_consume(xge_queue_h queueh,int data_max_size,xge_queue_item_t * item)392 xge_queue_consume(xge_queue_h queueh, int data_max_size, xge_queue_item_t *item)
393 {
394 xge_queue_t *queue = (xge_queue_t *)queueh;
395 unsigned long flags = 0;
396 xge_queue_status_e status;
397
398 xge_os_spin_lock_irq(&queue->lock, flags);
399 status = __queue_consume(queue, data_max_size, item);
400 xge_os_spin_unlock_irq(&queue->lock, flags);
401
402 return status;
403 }
404
405
406 /**
407 * xge_queue_flush - Flush, or empty, the queue.
408 * @queueh: Queue handle.
409 *
410 * Flush the queue, i.e. make it empty by consuming all events
411 * without invoking the event processing logic (callbacks, etc.)
412 */
xge_queue_flush(xge_queue_h queueh)413 void xge_queue_flush(xge_queue_h queueh)
414 {
415 unsigned char item_buf[sizeof(xge_queue_item_t) +
416 XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
417 xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
418 xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
419 XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
420
421 /* flush queue by consuming all enqueued items */
422 while (xge_queue_consume(queueh,
423 XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
424 item) != XGE_QUEUE_IS_EMPTY) {
425 /* do nothing */
426 xge_debug_queue(XGE_TRACE, "item "XGE_OS_LLXFMT"(%d) flushed",
427 item, item->event_type);
428 }
429 (void) __queue_get_reset_critical (queueh);
430 }
431
432 /*
433 * __queue_get_reset_critical - Check for critical events in the queue,
434 * @qh: Queue handle.
435 *
436 * Check for critical event(s) in the queue, and reset the
437 * "has-critical-event" flag upon return.
438 * Returns: 1 - if the queue contains atleast one critical event.
439 * 0 - If there are no critical events in the queue.
440 */
__queue_get_reset_critical(xge_queue_h qh)441 int __queue_get_reset_critical (xge_queue_h qh) {
442 xge_queue_t* queue = (xge_queue_t*)qh;
443 int c = queue->has_critical_event;
444
445 queue->has_critical_event = 0;
446 return c;
447 }
448