xref: /illumos-gate/usr/src/uts/common/io/xge/hal/xgehal/xge-queue.c (revision a07094369b21309434206d9b3601d162693466fc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  *  Copyright (c) 2002-2005 Neterion, Inc.
24  *  All right Reserved.
25  *
26  *  FileName :    xge-queue.c
27  *
28  *  Description:  serialized event queue
29  *
30  *  Created:      7 June 2004
31  */
32 
33 #include "xge-queue.h"
34 
35 /**
36  * xge_queue_item_data - Get item's data.
37  * @item: Queue item.
38  *
39  * Returns:  item data(variable size). Note that xge_queue_t
40  * contains items comprized of a fixed xge_queue_item_t "header"
41  * and a variable size data. This function returns the variable
42  * user-defined portion of the queue item.
43  */
44 void* xge_queue_item_data(xge_queue_item_t *item)
45 {
46 	return (char *)item + sizeof(xge_queue_item_t);
47 }
48 
49 /*
50  * __queue_consume - (Lockless) dequeue an item from the specified queue.
51  *
52  * @queue: Event queue.
53  * See xge_queue_consume().
54  */
55 static xge_queue_status_e
56 __queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
57 {
58 	int real_size;
59 	xge_queue_item_t *elem;
60 
61 	if (xge_list_is_empty(&queue->list_head))
62 		return XGE_QUEUE_IS_EMPTY;
63 
64 	elem = (xge_queue_item_t *)queue->list_head.next;
65 	if (elem->data_size > data_max_size)
66 		return XGE_QUEUE_NOT_ENOUGH_SPACE;
67 
68 	xge_list_remove(&elem->item);
69 	real_size = elem->data_size + sizeof(xge_queue_item_t);
70 	if (queue->head_ptr == elem) {
71 		queue->head_ptr = (char *)queue->head_ptr + real_size;
72 		xge_debug_queue(XGE_TRACE,
73 			"event_type: %d removing from the head: "
74 			"0x%llx:0x%llx:0x%llx:0x%llx elem 0x%llx length %d",
75 			elem->event_type,
76 			(u64)(ulong_t)queue->start_ptr,
77 			(u64)(ulong_t)queue->head_ptr,
78 			(u64)(ulong_t)queue->tail_ptr,
79 			(u64)(ulong_t)queue->end_ptr,
80 			(u64)(ulong_t)elem,
81 			real_size);
82 	} else if ((char *)queue->tail_ptr - real_size == (char*)elem) {
83 		queue->tail_ptr = (char *)queue->tail_ptr - real_size;
84 		xge_debug_queue(XGE_TRACE,
85 			"event_type: %d removing from the tail: "
86 			"0x%llx:0x%llx:0x%llx:0x%llx elem 0x%llx length %d",
87 			elem->event_type,
88 			(u64)(ulong_t)queue->start_ptr,
89 			(u64)(ulong_t)queue->head_ptr,
90 			(u64)(ulong_t)queue->tail_ptr,
91 			(u64)(ulong_t)queue->end_ptr,
92 			(u64)(ulong_t)elem,
93 			real_size);
94 	} else {
95 		xge_debug_queue(XGE_TRACE,
96 			"event_type: %d removing from the list: "
97 			"0x%llx:0x%llx:0x%llx:0x%llx elem 0x%llx length %d",
98 			elem->event_type,
99 			(u64)(ulong_t)queue->start_ptr,
100 			(u64)(ulong_t)queue->head_ptr,
101 			(u64)(ulong_t)queue->tail_ptr,
102 			(u64)(ulong_t)queue->end_ptr,
103 			(u64)(ulong_t)elem,
104 			real_size);
105 	}
106 	xge_assert(queue->tail_ptr >= queue->head_ptr);
107 	xge_assert(queue->tail_ptr >= queue->start_ptr &&
108 		    queue->tail_ptr <= queue->end_ptr);
109 	xge_assert(queue->head_ptr >= queue->start_ptr &&
110 		    queue->head_ptr < queue->end_ptr);
111 	xge_os_memcpy(item, elem, sizeof(xge_queue_item_t));
112 	xge_os_memcpy(xge_queue_item_data(item), xge_queue_item_data(elem),
113 		    elem->data_size);
114 
115 	if (xge_list_is_empty(&queue->list_head)) {
116 		/* reset buffer pointers just to be clean */
117 		queue->head_ptr = queue->tail_ptr = queue->start_ptr;
118 	}
119 	return XGE_QUEUE_OK;
120 }
121 
122 /**
123  * xge_queue_produce - Enqueue an item (see xge_queue_item_t{})
124  *                      into the specified queue.
125  * @queueh: Queue handle.
126  * @event_type: Event type. One of the enumerated event types
127  *              that both consumer and producer "understand".
128  *              For an example, please refer to xge_hal_event_e.
129  * @context: Opaque (void*) "context", for instance event producer object.
130  * @is_critical: For critical event, e.g. ECC.
131  * @data_size: Size of the @data.
132  * @data: User data of variable @data_size that is _copied_ into
133  *        the new queue item (see xge_queue_item_t{}). Upon return
134  *        from the call the @data memory can be re-used or released.
135  *
136  * Enqueue a new item.
137  *
138  * Returns: XGE_QUEUE_OK - success.
139  * XGE_QUEUE_IS_FULL - Queue is full.
140  * XGE_QUEUE_OUT_OF_MEMORY - Memory allocation failed.
141  *
142  * See also: xge_queue_item_t{}, xge_queue_consume().
143  */
144 xge_queue_status_e
145 xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
146 		int is_critical, const int data_size, void *data)
147 {
148 	xge_queue_t *queue = (xge_queue_t *)queueh;
149 	int real_size = data_size + sizeof(xge_queue_item_t);
150 	xge_queue_item_t *elem;
151 	unsigned long flags = 0;
152 
153 	xge_assert(real_size <= XGE_QUEUE_BUF_SIZE);
154 
155 	xge_os_spin_lock_irq(&queue->lock, flags);
156 
157 	if (is_critical && !queue->has_critical_event)  {
158 		unsigned char item_buf[sizeof(xge_queue_item_t) +
159 				XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
160 		xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
161 
162 	        while (__queue_consume(queue,
163 				       XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
164 				       item) != XGE_QUEUE_IS_EMPTY)
165 		        ; /* do nothing */
166 	}
167 
168 try_again:
169 	if ((char *)queue->tail_ptr + real_size <= (char *)queue->end_ptr) {
170 		elem = queue->tail_ptr;
171 		queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size);
172 		xge_debug_queue(XGE_TRACE,
173 			"event_type: %d adding to the tail: "
174 			"0x%llx:0x%llx:0x%llx:0x%llx elem 0x%llx length %d",
175 			event_type,
176 			(u64)(ulong_t)queue->start_ptr,
177 			(u64)(ulong_t)queue->head_ptr,
178 			(u64)(ulong_t)queue->tail_ptr,
179 			(u64)(ulong_t)queue->end_ptr,
180 			(u64)(ulong_t)elem,
181 			real_size);
182 	} else if ((char *)queue->head_ptr - real_size >=
183 					(char *)queue->start_ptr) {
184 		elem = (void *)((char *)queue->head_ptr - real_size);
185 		queue->head_ptr = elem;
186 		xge_debug_queue(XGE_TRACE,
187 			"event_type: %d adding to the head: "
188 			"0x%llx:0x%llx:0x%llx:0x%llx length %d",
189 			event_type,
190 			(u64)(ulong_t)queue->start_ptr,
191 			(u64)(ulong_t)queue->head_ptr,
192 			(u64)(ulong_t)queue->tail_ptr,
193 			(u64)(ulong_t)queue->end_ptr,
194 			real_size);
195 	} else {
196 		xge_queue_status_e status;
197 
198 		if (queue->pages_current >= queue->pages_max) {
199 			xge_os_spin_unlock_irq(&queue->lock, flags);
200 			return XGE_QUEUE_IS_FULL;
201 		}
202 
203 		if (queue->has_critical_event)
204 			return XGE_QUEUE_IS_FULL;
205 
206 		/* grow */
207 		status = __io_queue_grow(queueh);
208 		if (status != XGE_QUEUE_OK) {
209 			xge_os_spin_unlock_irq(&queue->lock, flags);
210 			return status;
211 		}
212 
213 		goto try_again;
214 	}
215 	xge_assert(queue->tail_ptr >= queue->head_ptr);
216 	xge_assert(queue->tail_ptr >= queue->start_ptr &&
217 		    queue->tail_ptr <= queue->end_ptr);
218 	xge_assert(queue->head_ptr >= queue->start_ptr &&
219 		    queue->head_ptr < queue->end_ptr);
220 	elem->data_size = data_size;
221 	elem->event_type = event_type;
222 	elem->is_critical = is_critical;
223 	if (is_critical)
224 	        queue->has_critical_event = 1;
225 	elem->context = context;
226 	xge_os_memcpy(xge_queue_item_data(elem), data, data_size);
227 	xge_list_insert_before(&elem->item, &queue->list_head);
228 	xge_os_spin_unlock_irq(&queue->lock, flags);
229 
230 	/* no lock taken! */
231 	queue->queued_func(queue->queued_data, event_type);
232 
233 	return XGE_QUEUE_OK;
234 }
235 
236 
237 /**
238  * xge_queue_create - Create protected first-in-first-out queue.
239  * @pdev: PCI device handle.
240  * @irqh: PCI device IRQ handle.
241  * @pages_initial: Number of pages to be initially allocated at the
242  * time of queue creation.
243  * @pages_max: Max number of pages that can be allocated in the queue.
244  * @queued: Optional callback function to be called each time a new item is
245  * added to the queue.
246  * @queued_data: Argument to the callback function.
247  *
248  * Create protected (fifo) queue.
249  *
250  * Returns: Pointer to xge_queue_t structure,
251  * NULL - on failure.
252  *
253  * See also: xge_queue_item_t{}, xge_queue_destroy().
254  */
255 xge_queue_h
256 xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
257 		int pages_max, xge_queued_f queued, void *queued_data)
258 {
259 	xge_queue_t *queue;
260 
261 	if ((queue = xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
262 		return NULL;
263 
264 	queue->queued_func = queued;
265 	queue->queued_data = queued_data;
266 	queue->pdev = pdev;
267 	queue->irqh = irqh;
268 	queue->pages_current = pages_initial;
269 	queue->start_ptr = xge_os_malloc(pdev, queue->pages_current *
270 	                               XGE_QUEUE_BUF_SIZE);
271 	if (queue->start_ptr == NULL) {
272 		xge_os_free(pdev, queue, sizeof(xge_queue_t));
273 		return NULL;
274 	}
275 	queue->head_ptr = queue->tail_ptr = queue->start_ptr;
276 	queue->end_ptr = (char *)queue->start_ptr +
277 		queue->pages_current * XGE_QUEUE_BUF_SIZE;
278 	xge_os_spin_lock_init_irq(&queue->lock, irqh);
279 	queue->pages_initial = pages_initial;
280 	queue->pages_max = pages_max;
281 	xge_list_init(&queue->list_head);
282 
283 	return queue;
284 }
285 
286 /**
287  * xge_queue_destroy - Destroy xge_queue_t object.
288  * @queueh: Queue handle.
289  *
290  * Destroy the specified xge_queue_t object.
291  *
292  * See also: xge_queue_item_t{}, xge_queue_create().
293  */
294 void xge_queue_destroy(xge_queue_h queueh)
295 {
296 	xge_queue_t *queue = (xge_queue_t *)queueh;
297 	if (!xge_list_is_empty(&queue->list_head)) {
298 		xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x%llx",
299 				 (u64)(ulong_t)queue);
300 	}
301 	xge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
302 	          XGE_QUEUE_BUF_SIZE);
303 
304 	xge_os_free(queue->pdev, queue, sizeof(xge_queue_t));
305 }
306 
307 /*
308  * __io_queue_grow - Dynamically increases the size of the queue.
309  * @queueh: Queue handle.
310  *
311  * This function is called in the case of no slot avaialble in the queue
312  * to accomodate the newly received event.
313  * Note that queue cannot grow beyond the max size specified for the
314  * queue.
315  *
316  * Returns XGE_QUEUE_OK: On success.
317  * XGE_QUEUE_OUT_OF_MEMORY : No memory is available.
318  */
319 xge_queue_status_e
320 __io_queue_grow(xge_queue_h queueh)
321 {
322 	xge_queue_t *queue = (xge_queue_t *)queueh;
323 	void *newbuf, *oldbuf;
324 	xge_list_t *item;
325 	xge_queue_item_t *elem;
326 
327 	xge_debug_queue(XGE_TRACE, "queue 0x%llx:%d is growing",
328 			 (u64)(ulong_t)queue, queue->pages_current);
329 
330 	newbuf = xge_os_malloc(queue->pdev,
331 	        (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE);
332 	if (newbuf == NULL)
333 		return XGE_QUEUE_OUT_OF_MEMORY;
334 
335 	xge_os_memcpy(newbuf, queue->start_ptr,
336 	       queue->pages_current * XGE_QUEUE_BUF_SIZE);
337 	oldbuf = queue->start_ptr;
338 
339 	/* adjust queue sizes */
340 	queue->start_ptr = newbuf;
341 	queue->end_ptr = (char *)newbuf +
342 			(queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE;
343 	queue->tail_ptr = (char *)newbuf + ((char *)queue->tail_ptr -
344 					    (char *)oldbuf);
345 	queue->head_ptr = (char *)newbuf + ((char *)queue->head_ptr -
346 					    (char *)oldbuf);
347 	xge_assert(!xge_list_is_empty(&queue->list_head));
348 	queue->list_head.next = (xge_list_t *) (void *)((char *)newbuf +
349 			((char *)queue->list_head.next - (char *)oldbuf));
350 	queue->list_head.prev = (xge_list_t *) (void *)((char *)newbuf +
351 			((char *)queue->list_head.prev - (char *)oldbuf));
352 	/* adjust queue list */
353 	xge_list_for_each(item, &queue->list_head) {
354 		elem = xge_container_of(item, xge_queue_item_t, item);
355 		if (elem->item.next != &queue->list_head) {
356 			elem->item.next =
357 				(xge_list_t*)(void *)((char *)newbuf +
358 				 ((char *)elem->item.next - (char *)oldbuf));
359 		}
360 		if (elem->item.prev != &queue->list_head) {
361 			elem->item.prev =
362 				(xge_list_t*) (void *)((char *)newbuf +
363 				 ((char *)elem->item.prev - (char *)oldbuf));
364 		}
365 	}
366 	xge_os_free(queue->pdev, oldbuf,
367 		  queue->pages_current * XGE_QUEUE_BUF_SIZE);
368 	queue->pages_current++;
369 
370 	return XGE_QUEUE_OK;
371 }
372 
373 /**
374  * xge_queue_consume - Dequeue an item from the specified queue.
375  * @queueh: Queue handle.
376  * @data_max_size: Maximum expected size of the item.
377  * @item: Memory area into which the item is _copied_ upon return
378  *        from the function.
379  *
380  * Dequeue an item from the queue. The caller is required to provide
381  * enough space for the item.
382  *
383  * Returns: XGE_QUEUE_OK - success.
384  * XGE_QUEUE_IS_EMPTY - Queue is empty.
385  * XGE_QUEUE_NOT_ENOUGH_SPACE - Requested item size(@data_max_size)
386  * is too small to accomodate an item from the queue.
387  *
388  * See also: xge_queue_item_t{}, xge_queue_produce().
389  */
390 xge_queue_status_e
391 xge_queue_consume(xge_queue_h queueh, int data_max_size, xge_queue_item_t *item)
392 {
393 	xge_queue_t *queue = (xge_queue_t *)queueh;
394 	unsigned long flags = 0;
395 	xge_queue_status_e status;
396 
397 	xge_os_spin_lock_irq(&queue->lock, flags);
398 	status = __queue_consume(queue, data_max_size, item);
399 	xge_os_spin_unlock_irq(&queue->lock, flags);
400 
401 	return status;
402 }
403 
404 
405 /**
406  * xge_queue_flush - Flush, or empty, the queue.
407  * @queueh: Queue handle.
408  *
409  * Flush the queue, i.e. make it empty by consuming all events
410  * without invoking the event processing logic (callbacks, etc.)
411  */
412 void xge_queue_flush(xge_queue_h queueh)
413 {
414 	unsigned char item_buf[sizeof(xge_queue_item_t) +
415 				XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
416 	xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
417 
418 	/* flush queue by consuming all enqueued items */
419 	while (xge_queue_consume(queueh,
420 				    XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
421 				    item) != XGE_QUEUE_IS_EMPTY) {
422 		/* do nothing */
423 		xge_debug_queue(XGE_TRACE, "item %llx(%d) flushed",
424 				 item, item->event_type);
425 	}
426 	(void) __queue_get_reset_critical (queueh);
427 }
428 
429 /*
430  * __queue_get_reset_critical - Check for critical events in the queue,
431  * @qh: Queue handle.
432  *
433  * Check for critical event(s) in the queue, and reset the
434  * "has-critical-event" flag upon return.
435  * Returns: 1 - if the queue contains atleast one critical event.
436  * 0 - If there are no critical events in the queue.
437  */
438 int __queue_get_reset_critical (xge_queue_h qh) {
439 	xge_queue_t* queue = (xge_queue_t*)qh;
440 	int c = queue->has_critical_event;
441 
442 	queue->has_critical_event = 0;
443         return c;
444 }
445