xref: /illumos-gate/usr/src/uts/common/io/xge/hal/xgehal/xge-queue.c (revision fbd1c0dae6f4a2ccc2ce0527c7f19d3dd5ea90b8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright (c) 2002-2006 Neterion, Inc.
22  */
23 
24 #include "xge-queue.h"
25 
26 /**
27  * xge_queue_item_data - Get item's data.
28  * @item: Queue item.
29  *
30  * Returns:  item data(variable size). Note that xge_queue_t
31  * contains items comprized of a fixed xge_queue_item_t "header"
32  * and a variable size data. This function returns the variable
33  * user-defined portion of the queue item.
34  */
35 void* xge_queue_item_data(xge_queue_item_t *item)
36 {
37 	return (char *)item + sizeof(xge_queue_item_t);
38 }
39 
40 /*
41  * __queue_consume - (Lockless) dequeue an item from the specified queue.
42  *
43  * @queue: Event queue.
44  * See xge_queue_consume().
45  */
46 static xge_queue_status_e
47 __queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
48 {
49 	int real_size;
50 	xge_queue_item_t *elem;
51 
52 	if (xge_list_is_empty(&queue->list_head))
53 		return XGE_QUEUE_IS_EMPTY;
54 
55 	elem = (xge_queue_item_t *)queue->list_head.next;
56 	if (elem->data_size > data_max_size)
57 		return XGE_QUEUE_NOT_ENOUGH_SPACE;
58 
59 	xge_list_remove(&elem->item);
60 	real_size = elem->data_size + sizeof(xge_queue_item_t);
61 	if (queue->head_ptr == elem) {
62 		queue->head_ptr = (char *)queue->head_ptr + real_size;
63 		xge_debug_queue(XGE_TRACE,
64 			"event_type: %d removing from the head: "
65 			"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
66 			":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
67 			elem->event_type,
68 			(u64)(ulong_t)queue->start_ptr,
69 			(u64)(ulong_t)queue->head_ptr,
70 			(u64)(ulong_t)queue->tail_ptr,
71 			(u64)(ulong_t)queue->end_ptr,
72 			(u64)(ulong_t)elem,
73 			real_size);
74 	} else if ((char *)queue->tail_ptr - real_size == (char*)elem) {
75 		queue->tail_ptr = (char *)queue->tail_ptr - real_size;
76 		xge_debug_queue(XGE_TRACE,
77 			"event_type: %d removing from the tail: "
78 			"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
79 			":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
80 			elem->event_type,
81 			(u64)(ulong_t)queue->start_ptr,
82 			(u64)(ulong_t)queue->head_ptr,
83 			(u64)(ulong_t)queue->tail_ptr,
84 			(u64)(ulong_t)queue->end_ptr,
85 			(u64)(ulong_t)elem,
86 			real_size);
87 	} else {
88 		xge_debug_queue(XGE_TRACE,
89 			"event_type: %d removing from the list: "
90 			"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
91 			":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
92 			elem->event_type,
93 			(u64)(ulong_t)queue->start_ptr,
94 			(u64)(ulong_t)queue->head_ptr,
95 			(u64)(ulong_t)queue->tail_ptr,
96 			(u64)(ulong_t)queue->end_ptr,
97 			(u64)(ulong_t)elem,
98 			real_size);
99 	}
100 	xge_assert(queue->tail_ptr >= queue->head_ptr);
101 	xge_assert(queue->tail_ptr >= queue->start_ptr &&
102 		    queue->tail_ptr <= queue->end_ptr);
103 	xge_assert(queue->head_ptr >= queue->start_ptr &&
104 		    queue->head_ptr < queue->end_ptr);
105 	xge_os_memcpy(item, elem, sizeof(xge_queue_item_t));
106 	xge_os_memcpy(xge_queue_item_data(item), xge_queue_item_data(elem),
107 		    elem->data_size);
108 
109 	if (xge_list_is_empty(&queue->list_head)) {
110 		/* reset buffer pointers just to be clean */
111 		queue->head_ptr = queue->tail_ptr = queue->start_ptr;
112 	}
113 	return XGE_QUEUE_OK;
114 }
115 
116 /**
117  * xge_queue_produce - Enqueue an item (see xge_queue_item_t{})
118  *                      into the specified queue.
119  * @queueh: Queue handle.
120  * @event_type: Event type. One of the enumerated event types
121  *              that both consumer and producer "understand".
122  *              For an example, please refer to xge_hal_event_e.
123  * @context: Opaque (void*) "context", for instance event producer object.
124  * @is_critical: For critical event, e.g. ECC.
125  * @data_size: Size of the @data.
126  * @data: User data of variable @data_size that is _copied_ into
127  *        the new queue item (see xge_queue_item_t{}). Upon return
128  *        from the call the @data memory can be re-used or released.
129  *
130  * Enqueue a new item.
131  *
132  * Returns: XGE_QUEUE_OK - success.
133  * XGE_QUEUE_IS_FULL - Queue is full.
134  * XGE_QUEUE_OUT_OF_MEMORY - Memory allocation failed.
135  *
136  * See also: xge_queue_item_t{}, xge_queue_consume().
137  */
138 xge_queue_status_e
139 xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
140 		int is_critical, const int data_size, void *data)
141 {
142 	xge_queue_t *queue = (xge_queue_t *)queueh;
143 	int real_size = data_size + sizeof(xge_queue_item_t);
144 	xge_queue_item_t *elem;
145 	unsigned long flags = 0;
146 
147 	xge_assert(real_size <= XGE_QUEUE_BUF_SIZE);
148 
149 	xge_os_spin_lock_irq(&queue->lock, flags);
150 
151 	if (is_critical && !queue->has_critical_event)  {
152 		unsigned char item_buf[sizeof(xge_queue_item_t) +
153 				XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
154 		xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
155 
156 	        while (__queue_consume(queue,
157 				       XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
158 				       item) != XGE_QUEUE_IS_EMPTY)
159 		        ; /* do nothing */
160 	}
161 
162 try_again:
163 	if ((char *)queue->tail_ptr + real_size <= (char *)queue->end_ptr) {
164         elem = (xge_queue_item_t *) queue->tail_ptr;
165 		queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size);
166 		xge_debug_queue(XGE_TRACE,
167 			"event_type: %d adding to the tail: "
168 			"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
169 			":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
170 			event_type,
171 			(u64)(ulong_t)queue->start_ptr,
172 			(u64)(ulong_t)queue->head_ptr,
173 			(u64)(ulong_t)queue->tail_ptr,
174 			(u64)(ulong_t)queue->end_ptr,
175 			(u64)(ulong_t)elem,
176 			real_size);
177 	} else if ((char *)queue->head_ptr - real_size >=
178 					(char *)queue->start_ptr) {
179         elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size);
180 		queue->head_ptr = elem;
181 		xge_debug_queue(XGE_TRACE,
182 			"event_type: %d adding to the head: "
183 			"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
184 			":0x"XGE_OS_LLXFMT" length %d",
185 			event_type,
186 			(u64)(ulong_t)queue->start_ptr,
187 			(u64)(ulong_t)queue->head_ptr,
188 			(u64)(ulong_t)queue->tail_ptr,
189 			(u64)(ulong_t)queue->end_ptr,
190 			real_size);
191 	} else {
192 		xge_queue_status_e status;
193 
194 		if (queue->pages_current >= queue->pages_max) {
195 			xge_os_spin_unlock_irq(&queue->lock, flags);
196 			return XGE_QUEUE_IS_FULL;
197 		}
198 
199 		if (queue->has_critical_event)
200 			return XGE_QUEUE_IS_FULL;
201 
202 		/* grow */
203 		status = __io_queue_grow(queueh);
204 		if (status != XGE_QUEUE_OK) {
205 			xge_os_spin_unlock_irq(&queue->lock, flags);
206 			return status;
207 		}
208 
209 		goto try_again;
210 	}
211 	xge_assert(queue->tail_ptr >= queue->head_ptr);
212 	xge_assert(queue->tail_ptr >= queue->start_ptr &&
213 		    queue->tail_ptr <= queue->end_ptr);
214 	xge_assert(queue->head_ptr >= queue->start_ptr &&
215 		    queue->head_ptr < queue->end_ptr);
216 	elem->data_size = data_size;
217     elem->event_type = (xge_hal_event_e) event_type;
218 	elem->is_critical = is_critical;
219 	if (is_critical)
220 	        queue->has_critical_event = 1;
221 	elem->context = context;
222 	xge_os_memcpy(xge_queue_item_data(elem), data, data_size);
223 	xge_list_insert_before(&elem->item, &queue->list_head);
224 	xge_os_spin_unlock_irq(&queue->lock, flags);
225 
226 	/* no lock taken! */
227 	queue->queued_func(queue->queued_data, event_type);
228 
229 	return XGE_QUEUE_OK;
230 }
231 
232 
233 /**
234  * xge_queue_create - Create protected first-in-first-out queue.
235  * @pdev: PCI device handle.
236  * @irqh: PCI device IRQ handle.
237  * @pages_initial: Number of pages to be initially allocated at the
238  * time of queue creation.
239  * @pages_max: Max number of pages that can be allocated in the queue.
240  * @queued: Optional callback function to be called each time a new item is
241  * added to the queue.
242  * @queued_data: Argument to the callback function.
243  *
244  * Create protected (fifo) queue.
245  *
246  * Returns: Pointer to xge_queue_t structure,
247  * NULL - on failure.
248  *
249  * See also: xge_queue_item_t{}, xge_queue_destroy().
250  */
251 xge_queue_h
252 xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
253 		int pages_max, xge_queued_f queued, void *queued_data)
254 {
255 	xge_queue_t *queue;
256 
257     if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
258 		return NULL;
259 
260 	queue->queued_func = queued;
261 	queue->queued_data = queued_data;
262 	queue->pdev = pdev;
263 	queue->irqh = irqh;
264 	queue->pages_current = pages_initial;
265 	queue->start_ptr = xge_os_malloc(pdev, queue->pages_current *
266 	                               XGE_QUEUE_BUF_SIZE);
267 	if (queue->start_ptr == NULL) {
268 		xge_os_free(pdev, queue, sizeof(xge_queue_t));
269 		return NULL;
270 	}
271 	queue->head_ptr = queue->tail_ptr = queue->start_ptr;
272 	queue->end_ptr = (char *)queue->start_ptr +
273 		queue->pages_current * XGE_QUEUE_BUF_SIZE;
274 	xge_os_spin_lock_init_irq(&queue->lock, irqh);
275 	queue->pages_initial = pages_initial;
276 	queue->pages_max = pages_max;
277 	xge_list_init(&queue->list_head);
278 
279 	return queue;
280 }
281 
282 /**
283  * xge_queue_destroy - Destroy xge_queue_t object.
284  * @queueh: Queue handle.
285  *
286  * Destroy the specified xge_queue_t object.
287  *
288  * See also: xge_queue_item_t{}, xge_queue_create().
289  */
290 void xge_queue_destroy(xge_queue_h queueh)
291 {
292 	xge_queue_t *queue = (xge_queue_t *)queueh;
293 	if (!xge_list_is_empty(&queue->list_head)) {
294 		xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x"
295 				XGE_OS_LLXFMT, (u64)(ulong_t)queue);
296 	}
297 	xge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
298 	          XGE_QUEUE_BUF_SIZE);
299 
300 	xge_os_free(queue->pdev, queue, sizeof(xge_queue_t));
301 }
302 
303 /*
304  * __io_queue_grow - Dynamically increases the size of the queue.
305  * @queueh: Queue handle.
306  *
307  * This function is called in the case of no slot avaialble in the queue
308  * to accomodate the newly received event.
309  * Note that queue cannot grow beyond the max size specified for the
310  * queue.
311  *
312  * Returns XGE_QUEUE_OK: On success.
313  * XGE_QUEUE_OUT_OF_MEMORY : No memory is available.
314  */
315 xge_queue_status_e
316 __io_queue_grow(xge_queue_h queueh)
317 {
318 	xge_queue_t *queue = (xge_queue_t *)queueh;
319 	void *newbuf, *oldbuf;
320 	xge_list_t *item;
321 	xge_queue_item_t *elem;
322 
323 	xge_debug_queue(XGE_TRACE, "queue 0x"XGE_OS_LLXFMT":%d is growing",
324 			 (u64)(ulong_t)queue, queue->pages_current);
325 
326 	newbuf = xge_os_malloc(queue->pdev,
327 	        (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE);
328 	if (newbuf == NULL)
329 		return XGE_QUEUE_OUT_OF_MEMORY;
330 
331 	xge_os_memcpy(newbuf, queue->start_ptr,
332 	       queue->pages_current * XGE_QUEUE_BUF_SIZE);
333 	oldbuf = queue->start_ptr;
334 
335 	/* adjust queue sizes */
336 	queue->start_ptr = newbuf;
337 	queue->end_ptr = (char *)newbuf +
338 			(queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE;
339 	queue->tail_ptr = (char *)newbuf + ((char *)queue->tail_ptr -
340 					    (char *)oldbuf);
341 	queue->head_ptr = (char *)newbuf + ((char *)queue->head_ptr -
342 					    (char *)oldbuf);
343 	xge_assert(!xge_list_is_empty(&queue->list_head));
344 	queue->list_head.next = (xge_list_t *) (void *)((char *)newbuf +
345 			((char *)queue->list_head.next - (char *)oldbuf));
346 	queue->list_head.prev = (xge_list_t *) (void *)((char *)newbuf +
347 			((char *)queue->list_head.prev - (char *)oldbuf));
348 	/* adjust queue list */
349 	xge_list_for_each(item, &queue->list_head) {
350 		elem = xge_container_of(item, xge_queue_item_t, item);
351 		if (elem->item.next != &queue->list_head) {
352 			elem->item.next =
353 				(xge_list_t*)(void *)((char *)newbuf +
354 				 ((char *)elem->item.next - (char *)oldbuf));
355 		}
356 		if (elem->item.prev != &queue->list_head) {
357 			elem->item.prev =
358 				(xge_list_t*) (void *)((char *)newbuf +
359 				 ((char *)elem->item.prev - (char *)oldbuf));
360 		}
361 	}
362 	xge_os_free(queue->pdev, oldbuf,
363 		  queue->pages_current * XGE_QUEUE_BUF_SIZE);
364 	queue->pages_current++;
365 
366 	return XGE_QUEUE_OK;
367 }
368 
369 /**
370  * xge_queue_consume - Dequeue an item from the specified queue.
371  * @queueh: Queue handle.
372  * @data_max_size: Maximum expected size of the item.
373  * @item: Memory area into which the item is _copied_ upon return
374  *        from the function.
375  *
376  * Dequeue an item from the queue. The caller is required to provide
377  * enough space for the item.
378  *
379  * Returns: XGE_QUEUE_OK - success.
380  * XGE_QUEUE_IS_EMPTY - Queue is empty.
381  * XGE_QUEUE_NOT_ENOUGH_SPACE - Requested item size(@data_max_size)
382  * is too small to accomodate an item from the queue.
383  *
384  * See also: xge_queue_item_t{}, xge_queue_produce().
385  */
386 xge_queue_status_e
387 xge_queue_consume(xge_queue_h queueh, int data_max_size, xge_queue_item_t *item)
388 {
389 	xge_queue_t *queue = (xge_queue_t *)queueh;
390 	unsigned long flags = 0;
391 	xge_queue_status_e status;
392 
393 	xge_os_spin_lock_irq(&queue->lock, flags);
394 	status = __queue_consume(queue, data_max_size, item);
395 	xge_os_spin_unlock_irq(&queue->lock, flags);
396 
397 	return status;
398 }
399 
400 
401 /**
402  * xge_queue_flush - Flush, or empty, the queue.
403  * @queueh: Queue handle.
404  *
405  * Flush the queue, i.e. make it empty by consuming all events
406  * without invoking the event processing logic (callbacks, etc.)
407  */
408 void xge_queue_flush(xge_queue_h queueh)
409 {
410 	unsigned char item_buf[sizeof(xge_queue_item_t) +
411 				XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
412 	xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
413 
414 	/* flush queue by consuming all enqueued items */
415 	while (xge_queue_consume(queueh,
416 				    XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
417 				    item) != XGE_QUEUE_IS_EMPTY) {
418 		/* do nothing */
419 		xge_debug_queue(XGE_TRACE, "item "XGE_OS_LLXFMT"(%d) flushed",
420 				 item, item->event_type);
421 	}
422 	(void) __queue_get_reset_critical (queueh);
423 }
424 
425 /*
426  * __queue_get_reset_critical - Check for critical events in the queue,
427  * @qh: Queue handle.
428  *
429  * Check for critical event(s) in the queue, and reset the
430  * "has-critical-event" flag upon return.
431  * Returns: 1 - if the queue contains atleast one critical event.
432  * 0 - If there are no critical events in the queue.
433  */
434 int __queue_get_reset_critical (xge_queue_h qh) {
435 	xge_queue_t* queue = (xge_queue_t*)qh;
436 	int c = queue->has_critical_event;
437 
438 	queue->has_critical_event = 0;
439         return c;
440 }
441