xref: /illumos-gate/usr/src/uts/common/os/errorq.c (revision bbf215553c7233fbab8a0afdf1fac74c44781867)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5d624471bSelowe  * Common Development and Distribution License (the "License").
6d624471bSelowe  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22267b64d5SStephen Hanson  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate /*
277c478bd9Sstevel@tonic-gate  * Kernel Error Queues
287c478bd9Sstevel@tonic-gate  *
297c478bd9Sstevel@tonic-gate  * A common problem when handling hardware error traps and interrupts is that
307c478bd9Sstevel@tonic-gate  * these errors frequently must be handled at high interrupt level, where
317c478bd9Sstevel@tonic-gate  * reliably producing error messages and safely examining and manipulating
327c478bd9Sstevel@tonic-gate  * other kernel state may not be possible.  The kernel error queue primitive is
337c478bd9Sstevel@tonic-gate  * a common set of routines that allow a subsystem to maintain a queue of
347c478bd9Sstevel@tonic-gate  * errors that can be processed by an explicit call from a safe context or by a
357c478bd9Sstevel@tonic-gate  * soft interrupt that fires at a specific lower interrupt level.  The queue
367c478bd9Sstevel@tonic-gate  * management code also ensures that if the system panics, all in-transit
377c478bd9Sstevel@tonic-gate  * errors are logged prior to reset.  Each queue has an associated kstat for
387c478bd9Sstevel@tonic-gate  * observing the number of errors dispatched and logged, and mdb(1) debugging
397c478bd9Sstevel@tonic-gate  * support is provided for live and post-mortem observability.
407c478bd9Sstevel@tonic-gate  *
417c478bd9Sstevel@tonic-gate  * Memory Allocation
427c478bd9Sstevel@tonic-gate  *
437c478bd9Sstevel@tonic-gate  * 	All of the queue data structures are allocated in advance as part of
447c478bd9Sstevel@tonic-gate  * 	the errorq_create() call.  No additional memory allocations are
457c478bd9Sstevel@tonic-gate  * 	performed as part of errorq_dispatch(), errorq_reserve(),
467c478bd9Sstevel@tonic-gate  *	errorq_commit() or errorq_drain().  This design
477c478bd9Sstevel@tonic-gate  * 	facilitates reliable error queue processing even when the system is low
487c478bd9Sstevel@tonic-gate  * 	on memory, and ensures that errorq_dispatch() can be called from any
497c478bd9Sstevel@tonic-gate  * 	context.  When the queue is created, the maximum queue length is
50267b64d5SStephen Hanson  * 	specified as a parameter to errorq_create() and errorq_nvcreate().  This
517c478bd9Sstevel@tonic-gate  *	length should represent a reasonable upper bound on the number of
527c478bd9Sstevel@tonic-gate  *	simultaneous errors.  If errorq_dispatch() or errorq_reserve() is
537c478bd9Sstevel@tonic-gate  *	invoked and no free queue elements are available, the error is
547c478bd9Sstevel@tonic-gate  *	dropped and will not be logged.  Typically, the queue will only be
557c478bd9Sstevel@tonic-gate  *	exhausted by an error storm, and in this case
567c478bd9Sstevel@tonic-gate  * 	the earlier errors provide the most important data for analysis.
577c478bd9Sstevel@tonic-gate  * 	When a new error is dispatched, the error data is copied into the
587c478bd9Sstevel@tonic-gate  * 	preallocated queue element so that the caller's buffer can be reused.
597c478bd9Sstevel@tonic-gate  *
60267b64d5SStephen Hanson  *	When a new error is reserved, an element is moved from the free pool
617c478bd9Sstevel@tonic-gate  *	and returned to the caller.  The element buffer data, eqe_data, may be
627c478bd9Sstevel@tonic-gate  *	managed by the caller and dispatched to the errorq by calling
637c478bd9Sstevel@tonic-gate  *	errorq_commit().  This is useful for additions to errorq's
647c478bd9Sstevel@tonic-gate  *	created with errorq_nvcreate() to handle name-value pair (nvpair) data.
657c478bd9Sstevel@tonic-gate  *	See below for a discussion on nvlist errorq's.
667c478bd9Sstevel@tonic-gate  *
677c478bd9Sstevel@tonic-gate  * Queue Drain Callback
687c478bd9Sstevel@tonic-gate  *
697c478bd9Sstevel@tonic-gate  *      When the error queue is drained, the caller's queue drain callback is
707c478bd9Sstevel@tonic-gate  *      invoked with a pointer to the saved error data.  This function may be
717c478bd9Sstevel@tonic-gate  *      called from passive kernel context or soft interrupt context at or
727c478bd9Sstevel@tonic-gate  *      below LOCK_LEVEL, or as part of panic().  As such, the callback should
737c478bd9Sstevel@tonic-gate  *      basically only be calling cmn_err (but NOT with the CE_PANIC flag).
747c478bd9Sstevel@tonic-gate  *      The callback must not call panic(), attempt to allocate memory, or wait
757c478bd9Sstevel@tonic-gate  *      on a condition variable.  The callback may not call errorq_destroy()
767c478bd9Sstevel@tonic-gate  *      or errorq_drain() on the same error queue that called it.
777c478bd9Sstevel@tonic-gate  *
787c478bd9Sstevel@tonic-gate  *      The queue drain callback will always be called for each pending error
797c478bd9Sstevel@tonic-gate  *      in the order in which errors were enqueued (oldest to newest).  The
807c478bd9Sstevel@tonic-gate  *      queue drain callback is guaranteed to provide at *least* once semantics
817c478bd9Sstevel@tonic-gate  *      for all errors that are successfully dispatched (i.e. for which
827c478bd9Sstevel@tonic-gate  *      errorq_dispatch() has successfully completed).  If an unrelated panic
837c478bd9Sstevel@tonic-gate  *      occurs while the queue drain callback is running on a vital queue, the
847c478bd9Sstevel@tonic-gate  *      panic subsystem will continue the queue drain and the callback may be
857c478bd9Sstevel@tonic-gate  *      invoked again for the same error.  Therefore, the callback should
867c478bd9Sstevel@tonic-gate  *      restrict itself to logging messages and taking other actions that are
877c478bd9Sstevel@tonic-gate  *      not destructive if repeated.
887c478bd9Sstevel@tonic-gate  *
897c478bd9Sstevel@tonic-gate  * Name-Value Pair Error Queues
907c478bd9Sstevel@tonic-gate  *
917c478bd9Sstevel@tonic-gate  *	During error handling, it may be more convenient to store error
927c478bd9Sstevel@tonic-gate  *	queue element data as a fixed buffer of name-value pairs.  The
93267b64d5SStephen Hanson  *	nvpair library allows construction and destruction of nvlists
947c478bd9Sstevel@tonic-gate  *	in pre-allocated memory buffers.
957c478bd9Sstevel@tonic-gate  *
967c478bd9Sstevel@tonic-gate  *	Error queues created via errorq_nvcreate() store queue element
977c478bd9Sstevel@tonic-gate  *	data as fixed buffer nvlists (ereports).  errorq_reserve()
98267b64d5SStephen Hanson  *	allocates an errorq element from eqp->eq_bitmap and returns a valid
997c478bd9Sstevel@tonic-gate  *	pointer	to a errorq_elem_t (queue element) and a pre-allocated
1007c478bd9Sstevel@tonic-gate  *	fixed buffer nvlist.  errorq_elem_nvl() is used to gain access
1017c478bd9Sstevel@tonic-gate  *	to the nvlist to add name-value ereport members prior to
1027c478bd9Sstevel@tonic-gate  *	dispatching the error queue element in errorq_commit().
1037c478bd9Sstevel@tonic-gate  *
1047c478bd9Sstevel@tonic-gate  *	Once dispatched, the drain function will return the element to
105267b64d5SStephen Hanson  *	eqp->eq_bitmap and reset the associated nv_alloc structure.
1067c478bd9Sstevel@tonic-gate  *	error_cancel() may be called to cancel an element reservation
1077c478bd9Sstevel@tonic-gate  *	element that was never dispatched (committed).  This is useful in
1087c478bd9Sstevel@tonic-gate  *	cases where a programming error prevents a queue element from being
1097c478bd9Sstevel@tonic-gate  *	dispatched.
1107c478bd9Sstevel@tonic-gate  *
1117c478bd9Sstevel@tonic-gate  * Queue Management
1127c478bd9Sstevel@tonic-gate  *
1137c478bd9Sstevel@tonic-gate  *      The queue element structures and error data buffers are allocated in
1147c478bd9Sstevel@tonic-gate  *      two contiguous chunks as part of errorq_create() or errorq_nvcreate().
1157c478bd9Sstevel@tonic-gate  *	Each queue element structure contains a next pointer,
1167c478bd9Sstevel@tonic-gate  *	a previous pointer, and a pointer to the corresponding error data
1177c478bd9Sstevel@tonic-gate  *	buffer.  The data buffer for a nvlist errorq is a shared buffer
1187c478bd9Sstevel@tonic-gate  *	for the allocation of name-value pair lists. The elements are kept on
119267b64d5SStephen Hanson  *      one of four lists:
1207c478bd9Sstevel@tonic-gate  *
121267b64d5SStephen Hanson  *	Unused elements are kept in the free pool, managed by eqp->eq_bitmap.
122267b64d5SStephen Hanson  *	The eqe_prev and eqe_next pointers are not used while in the free pool
123267b64d5SStephen Hanson  *	and will be set to NULL.
1247c478bd9Sstevel@tonic-gate  *
1257c478bd9Sstevel@tonic-gate  *      Pending errors are kept on the pending list, a singly-linked list
1267c478bd9Sstevel@tonic-gate  *      pointed to by eqp->eq_pend, and linked together using eqe_prev.  This
1277c478bd9Sstevel@tonic-gate  *      list is maintained in order from newest error to oldest.  The eqe_next
1287c478bd9Sstevel@tonic-gate  *      pointer is not used by the pending list and will be set to NULL.
1297c478bd9Sstevel@tonic-gate  *
1307c478bd9Sstevel@tonic-gate  *      The processing list is a doubly-linked list pointed to by eqp->eq_phead
1317c478bd9Sstevel@tonic-gate  *      (the oldest element) and eqp->eq_ptail (the newest element).  The
1327c478bd9Sstevel@tonic-gate  *      eqe_next pointer is used to traverse from eq_phead to eq_ptail, and the
1337c478bd9Sstevel@tonic-gate  *      eqe_prev pointer is used to traverse from eq_ptail to eq_phead.  Once a
1347c478bd9Sstevel@tonic-gate  *      queue drain operation begins, the current pending list is moved to the
135267b64d5SStephen Hanson  *      processing list in a two-phase commit fashion (eq_ptail being cleared
136267b64d5SStephen Hanson  *	at the beginning but eq_phead only at the end), allowing the panic code
1377c478bd9Sstevel@tonic-gate  *      to always locate and process all pending errors in the event that a
1387c478bd9Sstevel@tonic-gate  *      panic occurs in the middle of queue processing.
1397c478bd9Sstevel@tonic-gate  *
1407c478bd9Sstevel@tonic-gate  *	A fourth list is maintained for nvlist errorqs.  The dump list,
1417c478bd9Sstevel@tonic-gate  *	eq_dump is used to link all errorq elements that should be stored
1427c478bd9Sstevel@tonic-gate  *	in a crash dump file in the event of a system panic.  During
1437c478bd9Sstevel@tonic-gate  *	errorq_panic(), the list is created and subsequently traversed
1447c478bd9Sstevel@tonic-gate  *	in errorq_dump() during the final phases of a crash dump.
1457c478bd9Sstevel@tonic-gate  *
1467c478bd9Sstevel@tonic-gate  * Platform Considerations
1477c478bd9Sstevel@tonic-gate  *
1487c478bd9Sstevel@tonic-gate  *      In order to simplify their implementation, error queues make use of the
1497c478bd9Sstevel@tonic-gate  *      C wrappers for compare-and-swap.  If the platform itself does not
1507c478bd9Sstevel@tonic-gate  *      support compare-and-swap in hardware and the kernel emulation routines
1517c478bd9Sstevel@tonic-gate  *      are used instead, then the context in which errorq_dispatch() can be
1527c478bd9Sstevel@tonic-gate  *      safely invoked is further constrained by the implementation of the
1537c478bd9Sstevel@tonic-gate  *      compare-and-swap emulation.  Specifically, if errorq_dispatch() is
1547c478bd9Sstevel@tonic-gate  *      called from a code path that can be executed above ATOMIC_LEVEL on such
1557c478bd9Sstevel@tonic-gate  *      a platform, the dispatch code could potentially deadlock unless the
1567c478bd9Sstevel@tonic-gate  *      corresponding error interrupt is blocked or disabled prior to calling
1577c478bd9Sstevel@tonic-gate  *      errorq_dispatch().  Error queues should therefore be deployed with
1587c478bd9Sstevel@tonic-gate  *      caution on these platforms.
1597c478bd9Sstevel@tonic-gate  *
1607c478bd9Sstevel@tonic-gate  * Interfaces
1617c478bd9Sstevel@tonic-gate  *
1627c478bd9Sstevel@tonic-gate  * errorq_t *errorq_create(name, func, private, qlen, eltsize, ipl, flags);
1637c478bd9Sstevel@tonic-gate  * errorq_t *errorq_nvcreate(name, func, private, qlen, eltsize, ipl, flags);
1647c478bd9Sstevel@tonic-gate  *
1657c478bd9Sstevel@tonic-gate  *      Create a new error queue with the specified name, callback, and
1667c478bd9Sstevel@tonic-gate  *      properties.  A pointer to the new error queue is returned upon success,
1677c478bd9Sstevel@tonic-gate  *      or NULL is returned to indicate that the queue could not be created.
1687c478bd9Sstevel@tonic-gate  *      This function must be called from passive kernel context with no locks
1697c478bd9Sstevel@tonic-gate  *      held that can prevent a sleeping memory allocation from occurring.
1707c478bd9Sstevel@tonic-gate  *      errorq_create() will return failure if the queue kstats cannot be
1717c478bd9Sstevel@tonic-gate  *      created, or if a soft interrupt handler cannot be registered.
1727c478bd9Sstevel@tonic-gate  *
1737c478bd9Sstevel@tonic-gate  *      The queue 'name' is a string that is recorded for live and post-mortem
1747c478bd9Sstevel@tonic-gate  *      examination by a debugger.  The queue callback 'func' will be invoked
1757c478bd9Sstevel@tonic-gate  *      for each error drained from the queue, and will receive the 'private'
1767c478bd9Sstevel@tonic-gate  *      pointer as its first argument.  The callback must obey the rules for
1777c478bd9Sstevel@tonic-gate  *      callbacks described above.  The queue will have maximum length 'qlen'
1787c478bd9Sstevel@tonic-gate  *      and each element will be able to record up to 'eltsize' bytes of data.
1797c478bd9Sstevel@tonic-gate  *      The queue's soft interrupt (see errorq_dispatch(), below) will fire
1807c478bd9Sstevel@tonic-gate  *      at 'ipl', which should not exceed LOCK_LEVEL.  The queue 'flags' may
1817c478bd9Sstevel@tonic-gate  *      include the following flag:
1827c478bd9Sstevel@tonic-gate  *
1837c478bd9Sstevel@tonic-gate  *      ERRORQ_VITAL    - This queue contains information that is considered
1847c478bd9Sstevel@tonic-gate  *         vital to problem diagnosis.  Error queues that are marked vital will
1857c478bd9Sstevel@tonic-gate  *         be automatically drained by the panic subsystem prior to printing
1867c478bd9Sstevel@tonic-gate  *         the panic messages to the console.
1877c478bd9Sstevel@tonic-gate  *
1887c478bd9Sstevel@tonic-gate  * void errorq_destroy(errorq);
1897c478bd9Sstevel@tonic-gate  *
1907c478bd9Sstevel@tonic-gate  *      Destroy the specified error queue.  The queue is drained of any
1917c478bd9Sstevel@tonic-gate  *      pending elements and these are logged before errorq_destroy returns.
1927c478bd9Sstevel@tonic-gate  *      Once errorq_destroy() begins draining the queue, any simultaneous
1937c478bd9Sstevel@tonic-gate  *      calls to dispatch errors will result in the errors being dropped.
1947c478bd9Sstevel@tonic-gate  *      The caller must invoke a higher-level abstraction (e.g. disabling
1957c478bd9Sstevel@tonic-gate  *      an error interrupt) to ensure that error handling code does not
1967c478bd9Sstevel@tonic-gate  *      attempt to dispatch errors to the queue while it is being freed.
1977c478bd9Sstevel@tonic-gate  *
1987c478bd9Sstevel@tonic-gate  * void errorq_dispatch(errorq, data, len, flag);
1997c478bd9Sstevel@tonic-gate  *
2007c478bd9Sstevel@tonic-gate  *      Attempt to enqueue the specified error data.  If a free queue element
2017c478bd9Sstevel@tonic-gate  *      is available, the data is copied into a free element and placed on a
2027c478bd9Sstevel@tonic-gate  *      pending list.  If no free queue element is available, the error is
2037c478bd9Sstevel@tonic-gate  *      dropped.  The data length (len) is specified in bytes and should not
2047c478bd9Sstevel@tonic-gate  *      exceed the queue's maximum element size.  If the data length is less
2057c478bd9Sstevel@tonic-gate  *      than the maximum element size, the remainder of the queue element is
2067c478bd9Sstevel@tonic-gate  *      filled with zeroes.  The flag parameter should be one of:
2077c478bd9Sstevel@tonic-gate  *
2087c478bd9Sstevel@tonic-gate  *      ERRORQ_ASYNC    - Schedule a soft interrupt at the previously specified
2097c478bd9Sstevel@tonic-gate  *         IPL to asynchronously drain the queue on behalf of the caller.
2107c478bd9Sstevel@tonic-gate  *
2117c478bd9Sstevel@tonic-gate  *      ERRORQ_SYNC     - Do not schedule a soft interrupt to drain the queue.
2127c478bd9Sstevel@tonic-gate  *         The caller is presumed to be calling errorq_drain() or panic() in
2137c478bd9Sstevel@tonic-gate  *         the near future in order to drain the queue and log the error.
2147c478bd9Sstevel@tonic-gate  *
2157c478bd9Sstevel@tonic-gate  *      The errorq_dispatch() function may be called from any context, subject
2167c478bd9Sstevel@tonic-gate  *      to the Platform Considerations described above.
2177c478bd9Sstevel@tonic-gate  *
2187c478bd9Sstevel@tonic-gate  * void errorq_drain(errorq);
2197c478bd9Sstevel@tonic-gate  *
2207c478bd9Sstevel@tonic-gate  *      Drain the error queue of all pending errors.  The queue's callback
2217c478bd9Sstevel@tonic-gate  *      function is invoked for each error in order from oldest to newest.
2227c478bd9Sstevel@tonic-gate  *      This function may be used at or below LOCK_LEVEL or from panic context.
2237c478bd9Sstevel@tonic-gate  *
2247c478bd9Sstevel@tonic-gate  * errorq_elem_t *errorq_reserve(errorq);
2257c478bd9Sstevel@tonic-gate  *
2267c478bd9Sstevel@tonic-gate  *	Reserve an error queue element for later processing and dispatching.
2277c478bd9Sstevel@tonic-gate  *	The element is returned to the caller who may add error-specific data
228267b64d5SStephen Hanson  *	to element.  The element is retured to the free pool when either
2297c478bd9Sstevel@tonic-gate  *	errorq_commit() is called and the element asynchronously processed
2307c478bd9Sstevel@tonic-gate  *	or immediately when errorq_cancel() is called.
2317c478bd9Sstevel@tonic-gate  *
2327c478bd9Sstevel@tonic-gate  * void errorq_commit(errorq, errorq_elem, flag);
2337c478bd9Sstevel@tonic-gate  *
2347c478bd9Sstevel@tonic-gate  *	Commit an errorq element (eqep) for dispatching, see
2357c478bd9Sstevel@tonic-gate  *	errorq_dispatch().
2367c478bd9Sstevel@tonic-gate  *
2377c478bd9Sstevel@tonic-gate  * void errorq_cancel(errorq, errorq_elem);
2387c478bd9Sstevel@tonic-gate  *
2397c478bd9Sstevel@tonic-gate  *	Cancel a pending errorq element reservation.  The errorq element is
240267b64d5SStephen Hanson  *	returned to the free pool upon cancelation.
2417c478bd9Sstevel@tonic-gate  */
2427c478bd9Sstevel@tonic-gate 
2437c478bd9Sstevel@tonic-gate #include <sys/errorq_impl.h>
2447c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
2457c478bd9Sstevel@tonic-gate #include <sys/machlock.h>
2467c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
2477c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
2487c478bd9Sstevel@tonic-gate #include <sys/systm.h>
2497c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
2507c478bd9Sstevel@tonic-gate #include <sys/conf.h>
2517c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
2527c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
2537c478bd9Sstevel@tonic-gate #include <sys/bootconf.h>
2547c478bd9Sstevel@tonic-gate #include <sys/spl.h>
2557c478bd9Sstevel@tonic-gate #include <sys/dumphdr.h>
2567c478bd9Sstevel@tonic-gate #include <sys/compress.h>
2577c478bd9Sstevel@tonic-gate #include <sys/time.h>
2587c478bd9Sstevel@tonic-gate #include <sys/panic.h>
259267b64d5SStephen Hanson #include <sys/bitmap.h>
2607c478bd9Sstevel@tonic-gate #include <sys/fm/protocol.h>
2617c478bd9Sstevel@tonic-gate #include <sys/fm/util.h>
2627c478bd9Sstevel@tonic-gate 
2637c478bd9Sstevel@tonic-gate static struct errorq_kstat errorq_kstat_template = {
2647c478bd9Sstevel@tonic-gate 	{ "dispatched", KSTAT_DATA_UINT64 },
2657c478bd9Sstevel@tonic-gate 	{ "dropped", KSTAT_DATA_UINT64 },
2667c478bd9Sstevel@tonic-gate 	{ "logged", KSTAT_DATA_UINT64 },
2677c478bd9Sstevel@tonic-gate 	{ "reserved", KSTAT_DATA_UINT64 },
2687c478bd9Sstevel@tonic-gate 	{ "reserve_fail", KSTAT_DATA_UINT64 },
2697c478bd9Sstevel@tonic-gate 	{ "committed", KSTAT_DATA_UINT64 },
2707c478bd9Sstevel@tonic-gate 	{ "commit_fail", KSTAT_DATA_UINT64 },
2717c478bd9Sstevel@tonic-gate 	{ "cancelled", KSTAT_DATA_UINT64 }
2727c478bd9Sstevel@tonic-gate };
2737c478bd9Sstevel@tonic-gate 
2747c478bd9Sstevel@tonic-gate static uint64_t errorq_lost = 0;
2757c478bd9Sstevel@tonic-gate static errorq_t *errorq_list = NULL;
2767c478bd9Sstevel@tonic-gate static kmutex_t errorq_lock;
2777c478bd9Sstevel@tonic-gate static uint64_t errorq_vitalmin = 5;
2787c478bd9Sstevel@tonic-gate 
2797c478bd9Sstevel@tonic-gate static uint_t
errorq_intr(caddr_t eqp)2807c478bd9Sstevel@tonic-gate errorq_intr(caddr_t eqp)
2817c478bd9Sstevel@tonic-gate {
2827c478bd9Sstevel@tonic-gate 	errorq_drain((errorq_t *)eqp);
2837c478bd9Sstevel@tonic-gate 	return (DDI_INTR_CLAIMED);
2847c478bd9Sstevel@tonic-gate }
2857c478bd9Sstevel@tonic-gate 
2867c478bd9Sstevel@tonic-gate /*
2877c478bd9Sstevel@tonic-gate  * Create a new error queue with the specified properties and add a software
2887c478bd9Sstevel@tonic-gate  * interrupt handler and kstat for it.  This function must be called from
2897c478bd9Sstevel@tonic-gate  * passive kernel context with no locks held that can prevent a sleeping
2907c478bd9Sstevel@tonic-gate  * memory allocation from occurring.  This function will return NULL if the
2917c478bd9Sstevel@tonic-gate  * softint or kstat for this queue cannot be created.
2927c478bd9Sstevel@tonic-gate  */
2937c478bd9Sstevel@tonic-gate errorq_t *
errorq_create(const char * name,errorq_func_t func,void * private,ulong_t qlen,size_t size,uint_t ipl,uint_t flags)2947c478bd9Sstevel@tonic-gate errorq_create(const char *name, errorq_func_t func, void *private,
2957c478bd9Sstevel@tonic-gate     ulong_t qlen, size_t size, uint_t ipl, uint_t flags)
2967c478bd9Sstevel@tonic-gate {
2977c478bd9Sstevel@tonic-gate 	errorq_t *eqp = kmem_alloc(sizeof (errorq_t), KM_SLEEP);
2987c478bd9Sstevel@tonic-gate 	ddi_iblock_cookie_t ibc = (ddi_iblock_cookie_t)(uintptr_t)ipltospl(ipl);
2997c478bd9Sstevel@tonic-gate 	dev_info_t *dip = ddi_root_node();
3007c478bd9Sstevel@tonic-gate 
3017c478bd9Sstevel@tonic-gate 	errorq_elem_t *eep;
3027c478bd9Sstevel@tonic-gate 	ddi_softintr_t id = NULL;
3037c478bd9Sstevel@tonic-gate 	caddr_t data;
3047c478bd9Sstevel@tonic-gate 
3057c478bd9Sstevel@tonic-gate 	ASSERT(qlen != 0 && size != 0);
3067c478bd9Sstevel@tonic-gate 	ASSERT(ipl > 0 && ipl <= LOCK_LEVEL);
3077c478bd9Sstevel@tonic-gate 
3087c478bd9Sstevel@tonic-gate 	/*
3097c478bd9Sstevel@tonic-gate 	 * If a queue is created very early in boot before device tree services
3107c478bd9Sstevel@tonic-gate 	 * are available, the queue softint handler cannot be created.  We
3117c478bd9Sstevel@tonic-gate 	 * manually drain these queues and create their softint handlers when
3127c478bd9Sstevel@tonic-gate 	 * it is safe to do so as part of errorq_init(), below.
3137c478bd9Sstevel@tonic-gate 	 */
3147c478bd9Sstevel@tonic-gate 	if (modrootloaded && ddi_add_softintr(dip, DDI_SOFTINT_FIXED, &id,
3157c478bd9Sstevel@tonic-gate 	    &ibc, NULL, errorq_intr, (caddr_t)eqp) != DDI_SUCCESS) {
3167c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "errorq_create: failed to register "
3177c478bd9Sstevel@tonic-gate 		    "IPL %u softint for queue %s", ipl, name);
3187c478bd9Sstevel@tonic-gate 		kmem_free(eqp, sizeof (errorq_t));
3197c478bd9Sstevel@tonic-gate 		return (NULL);
3207c478bd9Sstevel@tonic-gate 	}
3217c478bd9Sstevel@tonic-gate 
322d624471bSelowe 	if ((eqp->eq_ksp = kstat_create("unix", 0, name, "errorq",
3237c478bd9Sstevel@tonic-gate 	    KSTAT_TYPE_NAMED, sizeof (struct errorq_kstat) /
3247c478bd9Sstevel@tonic-gate 	    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) == NULL) {
3257c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "errorq_create: failed to create kstat "
3267c478bd9Sstevel@tonic-gate 		    "for queue %s", name);
3277c478bd9Sstevel@tonic-gate 		if (id != NULL)
3287c478bd9Sstevel@tonic-gate 			ddi_remove_softintr(id);
3297c478bd9Sstevel@tonic-gate 		kmem_free(eqp, sizeof (errorq_t));
3307c478bd9Sstevel@tonic-gate 		return (NULL);
3317c478bd9Sstevel@tonic-gate 	}
3327c478bd9Sstevel@tonic-gate 
3337c478bd9Sstevel@tonic-gate 	bcopy(&errorq_kstat_template, &eqp->eq_kstat,
3347c478bd9Sstevel@tonic-gate 	    sizeof (struct errorq_kstat));
3357c478bd9Sstevel@tonic-gate 	eqp->eq_ksp->ks_data = &eqp->eq_kstat;
3367c478bd9Sstevel@tonic-gate 	eqp->eq_ksp->ks_private = eqp;
3377c478bd9Sstevel@tonic-gate 	kstat_install(eqp->eq_ksp);
3387c478bd9Sstevel@tonic-gate 
3397c478bd9Sstevel@tonic-gate 	(void) strncpy(eqp->eq_name, name, ERRORQ_NAMELEN);
3407c478bd9Sstevel@tonic-gate 	eqp->eq_name[ERRORQ_NAMELEN] = '\0';
3417c478bd9Sstevel@tonic-gate 	eqp->eq_func = func;
3427c478bd9Sstevel@tonic-gate 	eqp->eq_private = private;
3437c478bd9Sstevel@tonic-gate 	eqp->eq_data = kmem_alloc(qlen * size, KM_SLEEP);
3447c478bd9Sstevel@tonic-gate 	eqp->eq_qlen = qlen;
3457c478bd9Sstevel@tonic-gate 	eqp->eq_size = size;
3467c478bd9Sstevel@tonic-gate 	eqp->eq_ipl = ipl;
3477c478bd9Sstevel@tonic-gate 	eqp->eq_flags = flags | ERRORQ_ACTIVE;
3487c478bd9Sstevel@tonic-gate 	eqp->eq_id = id;
3497c478bd9Sstevel@tonic-gate 	mutex_init(&eqp->eq_lock, NULL, MUTEX_DEFAULT, NULL);
3507c478bd9Sstevel@tonic-gate 	eqp->eq_elems = kmem_alloc(qlen * sizeof (errorq_elem_t), KM_SLEEP);
3517c478bd9Sstevel@tonic-gate 	eqp->eq_phead = NULL;
3527c478bd9Sstevel@tonic-gate 	eqp->eq_ptail = NULL;
3537c478bd9Sstevel@tonic-gate 	eqp->eq_pend = NULL;
3547c478bd9Sstevel@tonic-gate 	eqp->eq_dump = NULL;
355267b64d5SStephen Hanson 	eqp->eq_bitmap = kmem_zalloc(BT_SIZEOFMAP(qlen), KM_SLEEP);
356267b64d5SStephen Hanson 	eqp->eq_rotor = 0;
3577c478bd9Sstevel@tonic-gate 
3587c478bd9Sstevel@tonic-gate 	/*
359267b64d5SStephen Hanson 	 * Iterate over the array of errorq_elem_t structures and set its
360267b64d5SStephen Hanson 	 * data pointer.
3617c478bd9Sstevel@tonic-gate 	 */
362267b64d5SStephen Hanson 	for (eep = eqp->eq_elems, data = eqp->eq_data; qlen > 1; qlen--) {
3637c478bd9Sstevel@tonic-gate 		eep->eqe_next = NULL;
3647c478bd9Sstevel@tonic-gate 		eep->eqe_dump = NULL;
365267b64d5SStephen Hanson 		eep->eqe_prev = NULL;
3667c478bd9Sstevel@tonic-gate 		eep->eqe_data = data;
3677c478bd9Sstevel@tonic-gate 		data += size;
3687c478bd9Sstevel@tonic-gate 		eep++;
3697c478bd9Sstevel@tonic-gate 	}
3707c478bd9Sstevel@tonic-gate 	eep->eqe_next = NULL;
3717c478bd9Sstevel@tonic-gate 	eep->eqe_prev = NULL;
3727c478bd9Sstevel@tonic-gate 	eep->eqe_data = data;
3737c478bd9Sstevel@tonic-gate 	eep->eqe_dump = NULL;
3747c478bd9Sstevel@tonic-gate 
3757c478bd9Sstevel@tonic-gate 	/*
3767c478bd9Sstevel@tonic-gate 	 * Once the errorq is initialized, add it to the global list of queues,
3777c478bd9Sstevel@tonic-gate 	 * and then return a pointer to the new queue to the caller.
3787c478bd9Sstevel@tonic-gate 	 */
3797c478bd9Sstevel@tonic-gate 	mutex_enter(&errorq_lock);
3807c478bd9Sstevel@tonic-gate 	eqp->eq_next = errorq_list;
3817c478bd9Sstevel@tonic-gate 	errorq_list = eqp;
3827c478bd9Sstevel@tonic-gate 	mutex_exit(&errorq_lock);
3837c478bd9Sstevel@tonic-gate 
3847c478bd9Sstevel@tonic-gate 	return (eqp);
3857c478bd9Sstevel@tonic-gate }
3867c478bd9Sstevel@tonic-gate 
3877c478bd9Sstevel@tonic-gate /*
3887c478bd9Sstevel@tonic-gate  * Create a new errorq as if by errorq_create(), but set the ERRORQ_NVLIST
3897c478bd9Sstevel@tonic-gate  * flag and initialize each element to have the start of its data region used
3907c478bd9Sstevel@tonic-gate  * as an errorq_nvelem_t with a nvlist allocator that consumes the data region.
3917c478bd9Sstevel@tonic-gate  */
3927c478bd9Sstevel@tonic-gate errorq_t *
errorq_nvcreate(const char * name,errorq_func_t func,void * private,ulong_t qlen,size_t size,uint_t ipl,uint_t flags)3937c478bd9Sstevel@tonic-gate errorq_nvcreate(const char *name, errorq_func_t func, void *private,
3947c478bd9Sstevel@tonic-gate     ulong_t qlen, size_t size, uint_t ipl, uint_t flags)
3957c478bd9Sstevel@tonic-gate {
3967c478bd9Sstevel@tonic-gate 	errorq_t *eqp;
3977c478bd9Sstevel@tonic-gate 	errorq_elem_t *eep;
3987c478bd9Sstevel@tonic-gate 
3997c478bd9Sstevel@tonic-gate 	eqp = errorq_create(name, func, private, qlen,
4007c478bd9Sstevel@tonic-gate 	    size + sizeof (errorq_nvelem_t), ipl, flags | ERRORQ_NVLIST);
4017c478bd9Sstevel@tonic-gate 
4027c478bd9Sstevel@tonic-gate 	if (eqp == NULL)
4037c478bd9Sstevel@tonic-gate 		return (NULL);
4047c478bd9Sstevel@tonic-gate 
4057c478bd9Sstevel@tonic-gate 	mutex_enter(&eqp->eq_lock);
4067c478bd9Sstevel@tonic-gate 
4077c478bd9Sstevel@tonic-gate 	for (eep = eqp->eq_elems; qlen != 0; eep++, qlen--) {
4087c478bd9Sstevel@tonic-gate 		errorq_nvelem_t *eqnp = eep->eqe_data;
4097c478bd9Sstevel@tonic-gate 		eqnp->eqn_buf = (char *)eqnp + sizeof (errorq_nvelem_t);
4107c478bd9Sstevel@tonic-gate 		eqnp->eqn_nva = fm_nva_xcreate(eqnp->eqn_buf, size);
4117c478bd9Sstevel@tonic-gate 	}
4127c478bd9Sstevel@tonic-gate 
4137c478bd9Sstevel@tonic-gate 	mutex_exit(&eqp->eq_lock);
4147c478bd9Sstevel@tonic-gate 	return (eqp);
4157c478bd9Sstevel@tonic-gate }
4167c478bd9Sstevel@tonic-gate 
4177c478bd9Sstevel@tonic-gate /*
4187c478bd9Sstevel@tonic-gate  * To destroy an error queue, we mark it as disabled and then explicitly drain
4197c478bd9Sstevel@tonic-gate  * all pending errors.  Once the drain is complete, we can remove the queue
4207c478bd9Sstevel@tonic-gate  * from the global list of queues examined by errorq_panic(), and then free
4217c478bd9Sstevel@tonic-gate  * the various queue data structures.  The caller must use some higher-level
4227c478bd9Sstevel@tonic-gate  * abstraction (e.g. disabling an error interrupt) to ensure that no one will
4237c478bd9Sstevel@tonic-gate  * attempt to enqueue new errors while we are freeing this queue.
4247c478bd9Sstevel@tonic-gate  */
4257c478bd9Sstevel@tonic-gate void
errorq_destroy(errorq_t * eqp)4267c478bd9Sstevel@tonic-gate errorq_destroy(errorq_t *eqp)
4277c478bd9Sstevel@tonic-gate {
4287c478bd9Sstevel@tonic-gate 	errorq_t *p, **pp;
4297c478bd9Sstevel@tonic-gate 	errorq_elem_t *eep;
4307c478bd9Sstevel@tonic-gate 	ulong_t i;
4317c478bd9Sstevel@tonic-gate 
4327c478bd9Sstevel@tonic-gate 	ASSERT(eqp != NULL);
4337c478bd9Sstevel@tonic-gate 	eqp->eq_flags &= ~ERRORQ_ACTIVE;
4347c478bd9Sstevel@tonic-gate 	errorq_drain(eqp);
4357c478bd9Sstevel@tonic-gate 
4367c478bd9Sstevel@tonic-gate 	mutex_enter(&errorq_lock);
4377c478bd9Sstevel@tonic-gate 	pp = &errorq_list;
4387c478bd9Sstevel@tonic-gate 
4397c478bd9Sstevel@tonic-gate 	for (p = errorq_list; p != NULL; p = p->eq_next) {
4407c478bd9Sstevel@tonic-gate 		if (p == eqp) {
4417c478bd9Sstevel@tonic-gate 			*pp = p->eq_next;
4427c478bd9Sstevel@tonic-gate 			break;
4437c478bd9Sstevel@tonic-gate 		}
4447c478bd9Sstevel@tonic-gate 		pp = &p->eq_next;
4457c478bd9Sstevel@tonic-gate 	}
4467c478bd9Sstevel@tonic-gate 
4477c478bd9Sstevel@tonic-gate 	mutex_exit(&errorq_lock);
4487c478bd9Sstevel@tonic-gate 	ASSERT(p != NULL);
4497c478bd9Sstevel@tonic-gate 
4507c478bd9Sstevel@tonic-gate 	if (eqp->eq_flags & ERRORQ_NVLIST) {
4517c478bd9Sstevel@tonic-gate 		for (eep = eqp->eq_elems, i = 0; i < eqp->eq_qlen; i++, eep++) {
4527c478bd9Sstevel@tonic-gate 			errorq_nvelem_t *eqnp = eep->eqe_data;
4537c478bd9Sstevel@tonic-gate 			fm_nva_xdestroy(eqnp->eqn_nva);
4547c478bd9Sstevel@tonic-gate 		}
4557c478bd9Sstevel@tonic-gate 	}
4567c478bd9Sstevel@tonic-gate 
4577c478bd9Sstevel@tonic-gate 	mutex_destroy(&eqp->eq_lock);
4587c478bd9Sstevel@tonic-gate 	kstat_delete(eqp->eq_ksp);
4597c478bd9Sstevel@tonic-gate 
4607c478bd9Sstevel@tonic-gate 	if (eqp->eq_id != NULL)
4617c478bd9Sstevel@tonic-gate 		ddi_remove_softintr(eqp->eq_id);
4627c478bd9Sstevel@tonic-gate 
4637c478bd9Sstevel@tonic-gate 	kmem_free(eqp->eq_elems, eqp->eq_qlen * sizeof (errorq_elem_t));
464267b64d5SStephen Hanson 	kmem_free(eqp->eq_bitmap, BT_SIZEOFMAP(eqp->eq_qlen));
4657c478bd9Sstevel@tonic-gate 	kmem_free(eqp->eq_data, eqp->eq_qlen * eqp->eq_size);
4667c478bd9Sstevel@tonic-gate 
4677c478bd9Sstevel@tonic-gate 	kmem_free(eqp, sizeof (errorq_t));
4687c478bd9Sstevel@tonic-gate }
4697c478bd9Sstevel@tonic-gate 
4707c478bd9Sstevel@tonic-gate /*
471267b64d5SStephen Hanson  * private version of bt_availbit which makes a best-efforts attempt
472267b64d5SStephen Hanson  * at allocating in a round-robin fashion in order to facilitate post-mortem
473267b64d5SStephen Hanson  * diagnosis.
474267b64d5SStephen Hanson  */
475267b64d5SStephen Hanson static index_t
errorq_availbit(ulong_t * bitmap,size_t nbits,index_t curindex)476267b64d5SStephen Hanson errorq_availbit(ulong_t *bitmap, size_t nbits, index_t curindex)
477267b64d5SStephen Hanson {
478267b64d5SStephen Hanson 	ulong_t bit, maxbit, bx;
479267b64d5SStephen Hanson 	index_t rval, nextindex = curindex + 1;
480267b64d5SStephen Hanson 	index_t nextword = nextindex >> BT_ULSHIFT;
481267b64d5SStephen Hanson 	ulong_t nextbitindex = nextindex & BT_ULMASK;
482267b64d5SStephen Hanson 	index_t maxindex = nbits - 1;
483267b64d5SStephen Hanson 	index_t maxword = maxindex >> BT_ULSHIFT;
484267b64d5SStephen Hanson 	ulong_t maxbitindex = maxindex & BT_ULMASK;
485267b64d5SStephen Hanson 
486267b64d5SStephen Hanson 	/*
487267b64d5SStephen Hanson 	 * First check if there are still some bits remaining in the current
488267b64d5SStephen Hanson 	 * word, and see if any of those are available. We need to do this by
489267b64d5SStephen Hanson 	 * hand as the bt_availbit() function always starts at the beginning
490267b64d5SStephen Hanson 	 * of a word.
491267b64d5SStephen Hanson 	 */
492267b64d5SStephen Hanson 	if (nextindex <= maxindex && nextbitindex != 0) {
493267b64d5SStephen Hanson 		maxbit = (nextword == maxword) ? maxbitindex : BT_ULMASK;
494267b64d5SStephen Hanson 		for (bx = 0, bit = 1; bx <= maxbit; bx++, bit <<= 1)
495267b64d5SStephen Hanson 			if (bx >= nextbitindex && !(bitmap[nextword] & bit))
496267b64d5SStephen Hanson 				return ((nextword << BT_ULSHIFT) + bx);
497267b64d5SStephen Hanson 		nextword++;
498267b64d5SStephen Hanson 	}
499267b64d5SStephen Hanson 	/*
500267b64d5SStephen Hanson 	 * Now check if there are any words remaining before the end of the
501267b64d5SStephen Hanson 	 * bitmap. Use bt_availbit() to find any free bits.
502267b64d5SStephen Hanson 	 */
503267b64d5SStephen Hanson 	if (nextword <= maxword)
504267b64d5SStephen Hanson 		if ((rval = bt_availbit(&bitmap[nextword],
505267b64d5SStephen Hanson 		    nbits - (nextword << BT_ULSHIFT))) != -1)
506267b64d5SStephen Hanson 			return ((nextword << BT_ULSHIFT) + rval);
507267b64d5SStephen Hanson 	/*
508267b64d5SStephen Hanson 	 * Finally loop back to the start and look for any free bits starting
509267b64d5SStephen Hanson 	 * from the beginning of the bitmap to the current rotor position.
510267b64d5SStephen Hanson 	 */
511267b64d5SStephen Hanson 	return (bt_availbit(bitmap, nextindex));
512267b64d5SStephen Hanson }
513267b64d5SStephen Hanson 
514267b64d5SStephen Hanson /*
5157c478bd9Sstevel@tonic-gate  * Dispatch a new error into the queue for later processing.  The specified
5167c478bd9Sstevel@tonic-gate  * data buffer is copied into a preallocated queue element.  If 'len' is
5177c478bd9Sstevel@tonic-gate  * smaller than the queue element size, the remainder of the queue element is
5187c478bd9Sstevel@tonic-gate  * filled with zeroes.  This function may be called from any context subject
5197c478bd9Sstevel@tonic-gate  * to the Platform Considerations described above.
5207c478bd9Sstevel@tonic-gate  */
5217c478bd9Sstevel@tonic-gate void
errorq_dispatch(errorq_t * eqp,const void * data,size_t len,uint_t flag)5227c478bd9Sstevel@tonic-gate errorq_dispatch(errorq_t *eqp, const void *data, size_t len, uint_t flag)
5237c478bd9Sstevel@tonic-gate {
5247c478bd9Sstevel@tonic-gate 	errorq_elem_t *eep, *old;
5257c478bd9Sstevel@tonic-gate 
5267c478bd9Sstevel@tonic-gate 	if (eqp == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
5271a5e258fSJosef 'Jeff' Sipek 		atomic_inc_64(&errorq_lost);
5287c478bd9Sstevel@tonic-gate 		return; /* drop error if queue is uninitialized or disabled */
5297c478bd9Sstevel@tonic-gate 	}
5307c478bd9Sstevel@tonic-gate 
531267b64d5SStephen Hanson 	for (;;) {
532267b64d5SStephen Hanson 		int i, rval;
5337c478bd9Sstevel@tonic-gate 
534267b64d5SStephen Hanson 		if ((i = errorq_availbit(eqp->eq_bitmap, eqp->eq_qlen,
535267b64d5SStephen Hanson 		    eqp->eq_rotor)) == -1) {
5361a5e258fSJosef 'Jeff' Sipek 			atomic_inc_64(&eqp->eq_kstat.eqk_dropped.value.ui64);
5377c478bd9Sstevel@tonic-gate 			return;
5387c478bd9Sstevel@tonic-gate 		}
539267b64d5SStephen Hanson 		BT_ATOMIC_SET_EXCL(eqp->eq_bitmap, i, rval);
540267b64d5SStephen Hanson 		if (rval == 0) {
541267b64d5SStephen Hanson 			eqp->eq_rotor = i;
542267b64d5SStephen Hanson 			eep = &eqp->eq_elems[i];
543267b64d5SStephen Hanson 			break;
544267b64d5SStephen Hanson 		}
545267b64d5SStephen Hanson 	}
5467c478bd9Sstevel@tonic-gate 
5477c478bd9Sstevel@tonic-gate 	ASSERT(len <= eqp->eq_size);
5487c478bd9Sstevel@tonic-gate 	bcopy(data, eep->eqe_data, MIN(eqp->eq_size, len));
5497c478bd9Sstevel@tonic-gate 
5507c478bd9Sstevel@tonic-gate 	if (len < eqp->eq_size)
5517c478bd9Sstevel@tonic-gate 		bzero((caddr_t)eep->eqe_data + len, eqp->eq_size - len);
5527c478bd9Sstevel@tonic-gate 
5537c478bd9Sstevel@tonic-gate 	for (;;) {
5547c478bd9Sstevel@tonic-gate 		old = eqp->eq_pend;
5557c478bd9Sstevel@tonic-gate 		eep->eqe_prev = old;
5567c478bd9Sstevel@tonic-gate 		membar_producer();
5577c478bd9Sstevel@tonic-gate 
55875d94465SJosef 'Jeff' Sipek 		if (atomic_cas_ptr(&eqp->eq_pend, old, eep) == old)
5597c478bd9Sstevel@tonic-gate 			break;
5607c478bd9Sstevel@tonic-gate 	}
5617c478bd9Sstevel@tonic-gate 
5621a5e258fSJosef 'Jeff' Sipek 	atomic_inc_64(&eqp->eq_kstat.eqk_dispatched.value.ui64);
5637c478bd9Sstevel@tonic-gate 
5647c478bd9Sstevel@tonic-gate 	if (flag == ERRORQ_ASYNC && eqp->eq_id != NULL)
5657c478bd9Sstevel@tonic-gate 		ddi_trigger_softintr(eqp->eq_id);
5667c478bd9Sstevel@tonic-gate }
5677c478bd9Sstevel@tonic-gate 
5687c478bd9Sstevel@tonic-gate /*
5697c478bd9Sstevel@tonic-gate  * Drain the specified error queue by calling eq_func() for each pending error.
5707c478bd9Sstevel@tonic-gate  * This function must be called at or below LOCK_LEVEL or from panic context.
5717c478bd9Sstevel@tonic-gate  * In order to synchronize with other attempts to drain the queue, we acquire
5727c478bd9Sstevel@tonic-gate  * the adaptive eq_lock, blocking other consumers.  Once this lock is held,
5737c478bd9Sstevel@tonic-gate  * we must use compare-and-swap to move the pending list to the processing
574267b64d5SStephen Hanson  * list and to return elements to the free pool in order to synchronize
575267b64d5SStephen Hanson  * with producers, who do not acquire any locks and only use atomic set/clear.
5767c478bd9Sstevel@tonic-gate  *
5777c478bd9Sstevel@tonic-gate  * An additional constraint on this function is that if the system panics
5787c478bd9Sstevel@tonic-gate  * while this function is running, the panic code must be able to detect and
5797c478bd9Sstevel@tonic-gate  * handle all intermediate states and correctly dequeue all errors.  The
5807c478bd9Sstevel@tonic-gate  * errorq_panic() function below will be used for detecting and handling
5817c478bd9Sstevel@tonic-gate  * these intermediate states.  The comments in errorq_drain() below explain
5827c478bd9Sstevel@tonic-gate  * how we make sure each intermediate state is distinct and consistent.
5837c478bd9Sstevel@tonic-gate  */
5847c478bd9Sstevel@tonic-gate void
errorq_drain(errorq_t * eqp)5857c478bd9Sstevel@tonic-gate errorq_drain(errorq_t *eqp)
5867c478bd9Sstevel@tonic-gate {
587267b64d5SStephen Hanson 	errorq_elem_t *eep, *dep;
5887c478bd9Sstevel@tonic-gate 
5897c478bd9Sstevel@tonic-gate 	ASSERT(eqp != NULL);
5907c478bd9Sstevel@tonic-gate 	mutex_enter(&eqp->eq_lock);
5917c478bd9Sstevel@tonic-gate 
5927c478bd9Sstevel@tonic-gate 	/*
5937c478bd9Sstevel@tonic-gate 	 * If there are one or more pending errors, set eq_ptail to point to
5947c478bd9Sstevel@tonic-gate 	 * the first element on the pending list and then attempt to compare-
5957c478bd9Sstevel@tonic-gate 	 * and-swap NULL to the pending list.  We use membar_producer() to
5967c478bd9Sstevel@tonic-gate 	 * make sure that eq_ptail will be visible to errorq_panic() below
5977c478bd9Sstevel@tonic-gate 	 * before the pending list is NULLed out.  This section is labeled
5987c478bd9Sstevel@tonic-gate 	 * case (1) for errorq_panic, below.  If eq_ptail is not yet set (1A)
59975d94465SJosef 'Jeff' Sipek 	 * eq_pend has all the pending errors.  If atomic_cas_ptr fails or
60075d94465SJosef 'Jeff' Sipek 	 * has not been called yet (1B), eq_pend still has all the pending
60175d94465SJosef 'Jeff' Sipek 	 * errors.  If atomic_cas_ptr succeeds (1C), eq_ptail has all the
60275d94465SJosef 'Jeff' Sipek 	 * pending errors.
6037c478bd9Sstevel@tonic-gate 	 */
6047c478bd9Sstevel@tonic-gate 	while ((eep = eqp->eq_pend) != NULL) {
6057c478bd9Sstevel@tonic-gate 		eqp->eq_ptail = eep;
6067c478bd9Sstevel@tonic-gate 		membar_producer();
6077c478bd9Sstevel@tonic-gate 
60875d94465SJosef 'Jeff' Sipek 		if (atomic_cas_ptr(&eqp->eq_pend, eep, NULL) == eep)
6097c478bd9Sstevel@tonic-gate 			break;
6107c478bd9Sstevel@tonic-gate 	}
6117c478bd9Sstevel@tonic-gate 
6127c478bd9Sstevel@tonic-gate 	/*
6137c478bd9Sstevel@tonic-gate 	 * If no errors were pending, assert that eq_ptail is set to NULL,
6147c478bd9Sstevel@tonic-gate 	 * drop the consumer lock, and return without doing anything.
6157c478bd9Sstevel@tonic-gate 	 */
6167c478bd9Sstevel@tonic-gate 	if (eep == NULL) {
6177c478bd9Sstevel@tonic-gate 		ASSERT(eqp->eq_ptail == NULL);
6187c478bd9Sstevel@tonic-gate 		mutex_exit(&eqp->eq_lock);
6197c478bd9Sstevel@tonic-gate 		return;
6207c478bd9Sstevel@tonic-gate 	}
6217c478bd9Sstevel@tonic-gate 
6227c478bd9Sstevel@tonic-gate 	/*
6237c478bd9Sstevel@tonic-gate 	 * Now iterate from eq_ptail (a.k.a. eep, the newest error) to the
6247c478bd9Sstevel@tonic-gate 	 * oldest error, setting the eqe_next pointer so that we can iterate
6257c478bd9Sstevel@tonic-gate 	 * over the errors from oldest to newest.  We use membar_producer()
6267c478bd9Sstevel@tonic-gate 	 * to make sure that these stores are visible before we set eq_phead.
6277c478bd9Sstevel@tonic-gate 	 * If we panic before, during, or just after this loop (case 2),
6287c478bd9Sstevel@tonic-gate 	 * errorq_panic() will simply redo this work, as described below.
6297c478bd9Sstevel@tonic-gate 	 */
6307c478bd9Sstevel@tonic-gate 	for (eep->eqe_next = NULL; eep->eqe_prev != NULL; eep = eep->eqe_prev)
6317c478bd9Sstevel@tonic-gate 		eep->eqe_prev->eqe_next = eep;
6327c478bd9Sstevel@tonic-gate 	membar_producer();
6337c478bd9Sstevel@tonic-gate 
6347c478bd9Sstevel@tonic-gate 	/*
6357c478bd9Sstevel@tonic-gate 	 * Now set eq_phead to the head of the processing list (the oldest
6367c478bd9Sstevel@tonic-gate 	 * error) and issue another membar_producer() to make sure that
6377c478bd9Sstevel@tonic-gate 	 * eq_phead is seen as non-NULL before we clear eq_ptail.  If we panic
6387c478bd9Sstevel@tonic-gate 	 * after eq_phead is set (case 3), we will detect and log these errors
6397c478bd9Sstevel@tonic-gate 	 * in errorq_panic(), as described below.
6407c478bd9Sstevel@tonic-gate 	 */
6417c478bd9Sstevel@tonic-gate 	eqp->eq_phead = eep;
6427c478bd9Sstevel@tonic-gate 	membar_producer();
6437c478bd9Sstevel@tonic-gate 
6447c478bd9Sstevel@tonic-gate 	eqp->eq_ptail = NULL;
6457c478bd9Sstevel@tonic-gate 	membar_producer();
6467c478bd9Sstevel@tonic-gate 
6477c478bd9Sstevel@tonic-gate 	/*
6487c478bd9Sstevel@tonic-gate 	 * If we enter from errorq_panic_drain(), we may already have
6497c478bd9Sstevel@tonic-gate 	 * errorq elements on the dump list.  Find the tail of
6507c478bd9Sstevel@tonic-gate 	 * the list ready for append.
6517c478bd9Sstevel@tonic-gate 	 */
652c6f039c7SToomas Soome 	dep = eqp->eq_dump;
653c6f039c7SToomas Soome 	if (panicstr && dep != NULL) {
6547c478bd9Sstevel@tonic-gate 		while (dep->eqe_dump != NULL)
6557c478bd9Sstevel@tonic-gate 			dep = dep->eqe_dump;
6567c478bd9Sstevel@tonic-gate 	}
6577c478bd9Sstevel@tonic-gate 
6587c478bd9Sstevel@tonic-gate 	/*
6597c478bd9Sstevel@tonic-gate 	 * Now iterate over the processing list from oldest (eq_phead) to
6607c478bd9Sstevel@tonic-gate 	 * newest and log each error.  Once an error is logged, we use
661267b64d5SStephen Hanson 	 * atomic clear to return it to the free pool.  If we panic before,
6627c478bd9Sstevel@tonic-gate 	 * during, or after calling eq_func() (case 4), the error will still be
6637c478bd9Sstevel@tonic-gate 	 * found on eq_phead and will be logged in errorq_panic below.
6647c478bd9Sstevel@tonic-gate 	 */
6657c478bd9Sstevel@tonic-gate 
6667c478bd9Sstevel@tonic-gate 	while ((eep = eqp->eq_phead) != NULL) {
6677c478bd9Sstevel@tonic-gate 		eqp->eq_func(eqp->eq_private, eep->eqe_data, eep);
6687c478bd9Sstevel@tonic-gate 		eqp->eq_kstat.eqk_logged.value.ui64++;
6697c478bd9Sstevel@tonic-gate 
6707c478bd9Sstevel@tonic-gate 		eqp->eq_phead = eep->eqe_next;
6717c478bd9Sstevel@tonic-gate 		membar_producer();
6727c478bd9Sstevel@tonic-gate 
6737c478bd9Sstevel@tonic-gate 		eep->eqe_next = NULL;
6747c478bd9Sstevel@tonic-gate 
6757c478bd9Sstevel@tonic-gate 		/*
6767c478bd9Sstevel@tonic-gate 		 * On panic, we add the element to the dump list for each
6777c478bd9Sstevel@tonic-gate 		 * nvlist errorq.  Elements are stored oldest to newest.
6781d76b125Sstephh 		 * Then continue, so we don't free and subsequently overwrite
6791d76b125Sstephh 		 * any elements which we've put on the dump queue.
6807c478bd9Sstevel@tonic-gate 		 */
6817c478bd9Sstevel@tonic-gate 		if (panicstr && (eqp->eq_flags & ERRORQ_NVLIST)) {
6827c478bd9Sstevel@tonic-gate 			if (eqp->eq_dump == NULL)
6837c478bd9Sstevel@tonic-gate 				dep = eqp->eq_dump = eep;
6847c478bd9Sstevel@tonic-gate 			else
6857c478bd9Sstevel@tonic-gate 				dep = dep->eqe_dump = eep;
6867c478bd9Sstevel@tonic-gate 			membar_producer();
6871d76b125Sstephh 			continue;
6881d76b125Sstephh 		}
6891d76b125Sstephh 
690267b64d5SStephen Hanson 		eep->eqe_prev = NULL;
691267b64d5SStephen Hanson 		BT_ATOMIC_CLEAR(eqp->eq_bitmap, eep - eqp->eq_elems);
6927c478bd9Sstevel@tonic-gate 	}
6937c478bd9Sstevel@tonic-gate 
6947c478bd9Sstevel@tonic-gate 	mutex_exit(&eqp->eq_lock);
6957c478bd9Sstevel@tonic-gate }
6967c478bd9Sstevel@tonic-gate 
6977c478bd9Sstevel@tonic-gate /*
6987c478bd9Sstevel@tonic-gate  * Now that device tree services are available, set up the soft interrupt
6997c478bd9Sstevel@tonic-gate  * handlers for any queues that were created early in boot.  We then
7007c478bd9Sstevel@tonic-gate  * manually drain these queues to report any pending early errors.
7017c478bd9Sstevel@tonic-gate  */
7027c478bd9Sstevel@tonic-gate void
errorq_init(void)7037c478bd9Sstevel@tonic-gate errorq_init(void)
7047c478bd9Sstevel@tonic-gate {
7057c478bd9Sstevel@tonic-gate 	dev_info_t *dip = ddi_root_node();
7067c478bd9Sstevel@tonic-gate 	ddi_softintr_t id;
7077c478bd9Sstevel@tonic-gate 	errorq_t *eqp;
7087c478bd9Sstevel@tonic-gate 
7097c478bd9Sstevel@tonic-gate 	ASSERT(modrootloaded != 0);
7107c478bd9Sstevel@tonic-gate 	ASSERT(dip != NULL);
7117c478bd9Sstevel@tonic-gate 
7127c478bd9Sstevel@tonic-gate 	mutex_enter(&errorq_lock);
7137c478bd9Sstevel@tonic-gate 
7147c478bd9Sstevel@tonic-gate 	for (eqp = errorq_list; eqp != NULL; eqp = eqp->eq_next) {
7157c478bd9Sstevel@tonic-gate 		ddi_iblock_cookie_t ibc =
7167c478bd9Sstevel@tonic-gate 		    (ddi_iblock_cookie_t)(uintptr_t)ipltospl(eqp->eq_ipl);
7177c478bd9Sstevel@tonic-gate 
7187c478bd9Sstevel@tonic-gate 		if (eqp->eq_id != NULL)
7197c478bd9Sstevel@tonic-gate 			continue; /* softint already initialized */
7207c478bd9Sstevel@tonic-gate 
7217c478bd9Sstevel@tonic-gate 		if (ddi_add_softintr(dip, DDI_SOFTINT_FIXED, &id, &ibc, NULL,
7227c478bd9Sstevel@tonic-gate 		    errorq_intr, (caddr_t)eqp) != DDI_SUCCESS) {
7237c478bd9Sstevel@tonic-gate 			panic("errorq_init: failed to register IPL %u softint "
7247c478bd9Sstevel@tonic-gate 			    "for queue %s", eqp->eq_ipl, eqp->eq_name);
7257c478bd9Sstevel@tonic-gate 		}
7267c478bd9Sstevel@tonic-gate 
7277c478bd9Sstevel@tonic-gate 		eqp->eq_id = id;
7287c478bd9Sstevel@tonic-gate 		errorq_drain(eqp);
7297c478bd9Sstevel@tonic-gate 	}
7307c478bd9Sstevel@tonic-gate 
7317c478bd9Sstevel@tonic-gate 	mutex_exit(&errorq_lock);
7327c478bd9Sstevel@tonic-gate }
7337c478bd9Sstevel@tonic-gate 
7347c478bd9Sstevel@tonic-gate /*
7357c478bd9Sstevel@tonic-gate  * This function is designed to be called from panic context only, and
7367c478bd9Sstevel@tonic-gate  * therefore does not need to acquire errorq_lock when iterating over
7377c478bd9Sstevel@tonic-gate  * errorq_list.  This function must be called no more than once for each
7387c478bd9Sstevel@tonic-gate  * 'what' value (if you change this then review the manipulation of 'dep'.
7397c478bd9Sstevel@tonic-gate  */
7407c478bd9Sstevel@tonic-gate static uint64_t
errorq_panic_drain(uint_t what)7417c478bd9Sstevel@tonic-gate errorq_panic_drain(uint_t what)
7427c478bd9Sstevel@tonic-gate {
743267b64d5SStephen Hanson 	errorq_elem_t *eep, *nep, *dep;
7447c478bd9Sstevel@tonic-gate 	errorq_t *eqp;
7457c478bd9Sstevel@tonic-gate 	uint64_t loggedtmp;
7467c478bd9Sstevel@tonic-gate 	uint64_t logged = 0;
7477c478bd9Sstevel@tonic-gate 
748c6f039c7SToomas Soome 	dep = NULL;
7497c478bd9Sstevel@tonic-gate 	for (eqp = errorq_list; eqp != NULL; eqp = eqp->eq_next) {
7507c478bd9Sstevel@tonic-gate 		if ((eqp->eq_flags & (ERRORQ_VITAL | ERRORQ_NVLIST)) != what)
7517c478bd9Sstevel@tonic-gate 			continue; /* do not drain this queue on this pass */
7527c478bd9Sstevel@tonic-gate 
7537c478bd9Sstevel@tonic-gate 		loggedtmp = eqp->eq_kstat.eqk_logged.value.ui64;
7547c478bd9Sstevel@tonic-gate 
7557c478bd9Sstevel@tonic-gate 		/*
75675d94465SJosef 'Jeff' Sipek 		 * In case (1B) above, eq_ptail may be set but the
75775d94465SJosef 'Jeff' Sipek 		 * atomic_cas_ptr may not have been executed yet or may have
75875d94465SJosef 'Jeff' Sipek 		 * failed.  Either way, we must log errors in chronological
75975d94465SJosef 'Jeff' Sipek 		 * order.  So we search the pending list for the error
76075d94465SJosef 'Jeff' Sipek 		 * pointed to by eq_ptail.  If it is found, we know that all
76175d94465SJosef 'Jeff' Sipek 		 * subsequent errors are also still on the pending list, so
76275d94465SJosef 'Jeff' Sipek 		 * just NULL out eq_ptail and let errorq_drain(), below,
76375d94465SJosef 'Jeff' Sipek 		 * take care of the logging.
7647c478bd9Sstevel@tonic-gate 		 */
7657c478bd9Sstevel@tonic-gate 		for (eep = eqp->eq_pend; eep != NULL; eep = eep->eqe_prev) {
7667c478bd9Sstevel@tonic-gate 			if (eep == eqp->eq_ptail) {
7677c478bd9Sstevel@tonic-gate 				ASSERT(eqp->eq_phead == NULL);
7687c478bd9Sstevel@tonic-gate 				eqp->eq_ptail = NULL;
7697c478bd9Sstevel@tonic-gate 				break;
7707c478bd9Sstevel@tonic-gate 			}
7717c478bd9Sstevel@tonic-gate 		}
7727c478bd9Sstevel@tonic-gate 
7737c478bd9Sstevel@tonic-gate 		/*
7747c478bd9Sstevel@tonic-gate 		 * In cases (1C) and (2) above, eq_ptail will be set to the
7757c478bd9Sstevel@tonic-gate 		 * newest error on the processing list but eq_phead will still
7767c478bd9Sstevel@tonic-gate 		 * be NULL.  We set the eqe_next pointers so we can iterate
7777c478bd9Sstevel@tonic-gate 		 * over the processing list in order from oldest error to the
7787c478bd9Sstevel@tonic-gate 		 * newest error.  We then set eq_phead to point to the oldest
7797c478bd9Sstevel@tonic-gate 		 * error and fall into the for-loop below.
7807c478bd9Sstevel@tonic-gate 		 */
7817c478bd9Sstevel@tonic-gate 		if (eqp->eq_phead == NULL && (eep = eqp->eq_ptail) != NULL) {
7827c478bd9Sstevel@tonic-gate 			for (eep->eqe_next = NULL; eep->eqe_prev != NULL;
7837c478bd9Sstevel@tonic-gate 			    eep = eep->eqe_prev)
7847c478bd9Sstevel@tonic-gate 				eep->eqe_prev->eqe_next = eep;
7857c478bd9Sstevel@tonic-gate 
7867c478bd9Sstevel@tonic-gate 			eqp->eq_phead = eep;
7877c478bd9Sstevel@tonic-gate 			eqp->eq_ptail = NULL;
7887c478bd9Sstevel@tonic-gate 		}
7897c478bd9Sstevel@tonic-gate 
7907c478bd9Sstevel@tonic-gate 		/*
7917c478bd9Sstevel@tonic-gate 		 * In cases (3) and (4) above (or after case (1C/2) handling),
7927c478bd9Sstevel@tonic-gate 		 * eq_phead will be set to the oldest error on the processing
793267b64d5SStephen Hanson 		 * list.  We log each error and return it to the free pool.
7947c478bd9Sstevel@tonic-gate 		 *
7957c478bd9Sstevel@tonic-gate 		 * Unlike errorq_drain(), we don't need to worry about updating
7967c478bd9Sstevel@tonic-gate 		 * eq_phead because errorq_panic() will be called at most once.
79775d94465SJosef 'Jeff' Sipek 		 * However, we must use atomic_cas_ptr to update the
79875d94465SJosef 'Jeff' Sipek 		 * freelist in case errors are still being enqueued during
79975d94465SJosef 'Jeff' Sipek 		 * panic.
8007c478bd9Sstevel@tonic-gate 		 */
8017c478bd9Sstevel@tonic-gate 		for (eep = eqp->eq_phead; eep != NULL; eep = nep) {
8027c478bd9Sstevel@tonic-gate 			eqp->eq_func(eqp->eq_private, eep->eqe_data, eep);
8037c478bd9Sstevel@tonic-gate 			eqp->eq_kstat.eqk_logged.value.ui64++;
8047c478bd9Sstevel@tonic-gate 
8057c478bd9Sstevel@tonic-gate 			nep = eep->eqe_next;
8067c478bd9Sstevel@tonic-gate 			eep->eqe_next = NULL;
8077c478bd9Sstevel@tonic-gate 
8087c478bd9Sstevel@tonic-gate 			/*
8097c478bd9Sstevel@tonic-gate 			 * On panic, we add the element to the dump list for
8101d76b125Sstephh 			 * each nvlist errorq, stored oldest to newest. Then
8111d76b125Sstephh 			 * continue, so we don't free and subsequently overwrite
8121d76b125Sstephh 			 * any elements which we've put on the dump queue.
8137c478bd9Sstevel@tonic-gate 			 */
8147c478bd9Sstevel@tonic-gate 			if (eqp->eq_flags & ERRORQ_NVLIST) {
8157c478bd9Sstevel@tonic-gate 				if (eqp->eq_dump == NULL)
8167c478bd9Sstevel@tonic-gate 					dep = eqp->eq_dump = eep;
8177c478bd9Sstevel@tonic-gate 				else
8187c478bd9Sstevel@tonic-gate 					dep = dep->eqe_dump = eep;
8197c478bd9Sstevel@tonic-gate 				membar_producer();
8201d76b125Sstephh 				continue;
8211d76b125Sstephh 			}
8221d76b125Sstephh 
823267b64d5SStephen Hanson 			eep->eqe_prev = NULL;
824267b64d5SStephen Hanson 			BT_ATOMIC_CLEAR(eqp->eq_bitmap, eep - eqp->eq_elems);
8257c478bd9Sstevel@tonic-gate 		}
8267c478bd9Sstevel@tonic-gate 
8277c478bd9Sstevel@tonic-gate 		/*
8287c478bd9Sstevel@tonic-gate 		 * Now go ahead and drain any other errors on the pending list.
8297c478bd9Sstevel@tonic-gate 		 * This call transparently handles case (1A) above, as well as
8307c478bd9Sstevel@tonic-gate 		 * any other errors that were dispatched after errorq_drain()
8317c478bd9Sstevel@tonic-gate 		 * completed its first compare-and-swap.
8327c478bd9Sstevel@tonic-gate 		 */
8337c478bd9Sstevel@tonic-gate 		errorq_drain(eqp);
8347c478bd9Sstevel@tonic-gate 
8357c478bd9Sstevel@tonic-gate 		logged += eqp->eq_kstat.eqk_logged.value.ui64 - loggedtmp;
8367c478bd9Sstevel@tonic-gate 	}
8377c478bd9Sstevel@tonic-gate 	return (logged);
8387c478bd9Sstevel@tonic-gate }
8397c478bd9Sstevel@tonic-gate 
8407c478bd9Sstevel@tonic-gate /*
8417c478bd9Sstevel@tonic-gate  * Drain all error queues - called only from panic context.  Some drain
8427c478bd9Sstevel@tonic-gate  * functions may enqueue errors to ERRORQ_NVLIST error queues so that
8437c478bd9Sstevel@tonic-gate  * they may be written out in the panic dump - so ERRORQ_NVLIST queues
8447c478bd9Sstevel@tonic-gate  * must be drained last.  Drain ERRORQ_VITAL queues before nonvital queues
8457c478bd9Sstevel@tonic-gate  * so that vital errors get to fill the ERRORQ_NVLIST queues first, and
8467c478bd9Sstevel@tonic-gate  * do not drain the nonvital queues if there are many vital errors.
8477c478bd9Sstevel@tonic-gate  */
8487c478bd9Sstevel@tonic-gate void
errorq_panic(void)8497c478bd9Sstevel@tonic-gate errorq_panic(void)
8507c478bd9Sstevel@tonic-gate {
8517c478bd9Sstevel@tonic-gate 	ASSERT(panicstr != NULL);
8527c478bd9Sstevel@tonic-gate 
8537c478bd9Sstevel@tonic-gate 	if (errorq_panic_drain(ERRORQ_VITAL) <= errorq_vitalmin)
8547c478bd9Sstevel@tonic-gate 		(void) errorq_panic_drain(0);
8557c478bd9Sstevel@tonic-gate 	(void) errorq_panic_drain(ERRORQ_VITAL | ERRORQ_NVLIST);
8567c478bd9Sstevel@tonic-gate 	(void) errorq_panic_drain(ERRORQ_NVLIST);
8577c478bd9Sstevel@tonic-gate }
8587c478bd9Sstevel@tonic-gate 
8597c478bd9Sstevel@tonic-gate /*
8607c478bd9Sstevel@tonic-gate  * Reserve an error queue element for later processing and dispatching.  The
8617c478bd9Sstevel@tonic-gate  * element is returned to the caller who may add error-specific data to
862267b64d5SStephen Hanson  * element.  The element is retured to the free pool when either
8637c478bd9Sstevel@tonic-gate  * errorq_commit() is called and the element asynchronously processed
8647c478bd9Sstevel@tonic-gate  * or immediately when errorq_cancel() is called.
8657c478bd9Sstevel@tonic-gate  */
8667c478bd9Sstevel@tonic-gate errorq_elem_t *
errorq_reserve(errorq_t * eqp)8677c478bd9Sstevel@tonic-gate errorq_reserve(errorq_t *eqp)
8687c478bd9Sstevel@tonic-gate {
8697c478bd9Sstevel@tonic-gate 	errorq_elem_t *eqep;
8707c478bd9Sstevel@tonic-gate 
8717c478bd9Sstevel@tonic-gate 	if (eqp == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
8721a5e258fSJosef 'Jeff' Sipek 		atomic_inc_64(&errorq_lost);
8737c478bd9Sstevel@tonic-gate 		return (NULL);
8747c478bd9Sstevel@tonic-gate 	}
8757c478bd9Sstevel@tonic-gate 
876267b64d5SStephen Hanson 	for (;;) {
877267b64d5SStephen Hanson 		int i, rval;
8787c478bd9Sstevel@tonic-gate 
879267b64d5SStephen Hanson 		if ((i = errorq_availbit(eqp->eq_bitmap, eqp->eq_qlen,
880267b64d5SStephen Hanson 		    eqp->eq_rotor)) == -1) {
8811a5e258fSJosef 'Jeff' Sipek 			atomic_inc_64(&eqp->eq_kstat.eqk_dropped.value.ui64);
8827c478bd9Sstevel@tonic-gate 			return (NULL);
8837c478bd9Sstevel@tonic-gate 		}
884267b64d5SStephen Hanson 		BT_ATOMIC_SET_EXCL(eqp->eq_bitmap, i, rval);
885267b64d5SStephen Hanson 		if (rval == 0) {
886267b64d5SStephen Hanson 			eqp->eq_rotor = i;
887267b64d5SStephen Hanson 			eqep = &eqp->eq_elems[i];
888267b64d5SStephen Hanson 			break;
889267b64d5SStephen Hanson 		}
890267b64d5SStephen Hanson 	}
8917c478bd9Sstevel@tonic-gate 
8927c478bd9Sstevel@tonic-gate 	if (eqp->eq_flags & ERRORQ_NVLIST) {
8937c478bd9Sstevel@tonic-gate 		errorq_nvelem_t *eqnp = eqep->eqe_data;
8947c478bd9Sstevel@tonic-gate 		nv_alloc_reset(eqnp->eqn_nva);
8957c478bd9Sstevel@tonic-gate 		eqnp->eqn_nvl = fm_nvlist_create(eqnp->eqn_nva);
8967c478bd9Sstevel@tonic-gate 	}
8977c478bd9Sstevel@tonic-gate 
8981a5e258fSJosef 'Jeff' Sipek 	atomic_inc_64(&eqp->eq_kstat.eqk_reserved.value.ui64);
8997c478bd9Sstevel@tonic-gate 	return (eqep);
9007c478bd9Sstevel@tonic-gate }
9017c478bd9Sstevel@tonic-gate 
9027c478bd9Sstevel@tonic-gate /*
9037c478bd9Sstevel@tonic-gate  * Commit an errorq element (eqep) for dispatching.
9047c478bd9Sstevel@tonic-gate  * This function may be called from any context subject
9057c478bd9Sstevel@tonic-gate  * to the Platform Considerations described above.
9067c478bd9Sstevel@tonic-gate  */
9077c478bd9Sstevel@tonic-gate void
errorq_commit(errorq_t * eqp,errorq_elem_t * eqep,uint_t flag)9087c478bd9Sstevel@tonic-gate errorq_commit(errorq_t *eqp, errorq_elem_t *eqep, uint_t flag)
9097c478bd9Sstevel@tonic-gate {
9107c478bd9Sstevel@tonic-gate 	errorq_elem_t *old;
9117c478bd9Sstevel@tonic-gate 
9127c478bd9Sstevel@tonic-gate 	if (eqep == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
9131a5e258fSJosef 'Jeff' Sipek 		atomic_inc_64(&eqp->eq_kstat.eqk_commit_fail.value.ui64);
9147c478bd9Sstevel@tonic-gate 		return;
9157c478bd9Sstevel@tonic-gate 	}
9167c478bd9Sstevel@tonic-gate 
9177c478bd9Sstevel@tonic-gate 	for (;;) {
9187c478bd9Sstevel@tonic-gate 		old = eqp->eq_pend;
9197c478bd9Sstevel@tonic-gate 		eqep->eqe_prev = old;
9207c478bd9Sstevel@tonic-gate 		membar_producer();
9217c478bd9Sstevel@tonic-gate 
92275d94465SJosef 'Jeff' Sipek 		if (atomic_cas_ptr(&eqp->eq_pend, old, eqep) == old)
9237c478bd9Sstevel@tonic-gate 			break;
9247c478bd9Sstevel@tonic-gate 	}
9257c478bd9Sstevel@tonic-gate 
9261a5e258fSJosef 'Jeff' Sipek 	atomic_inc_64(&eqp->eq_kstat.eqk_committed.value.ui64);
9277c478bd9Sstevel@tonic-gate 
9287c478bd9Sstevel@tonic-gate 	if (flag == ERRORQ_ASYNC && eqp->eq_id != NULL)
9297c478bd9Sstevel@tonic-gate 		ddi_trigger_softintr(eqp->eq_id);
9307c478bd9Sstevel@tonic-gate }
9317c478bd9Sstevel@tonic-gate 
9327c478bd9Sstevel@tonic-gate /*
9337c478bd9Sstevel@tonic-gate  * Cancel an errorq element reservation by returning the specified element
934267b64d5SStephen Hanson  * to the free pool.  Duplicate or invalid frees are not supported.
9357c478bd9Sstevel@tonic-gate  */
9367c478bd9Sstevel@tonic-gate void
errorq_cancel(errorq_t * eqp,errorq_elem_t * eqep)9377c478bd9Sstevel@tonic-gate errorq_cancel(errorq_t *eqp, errorq_elem_t *eqep)
9387c478bd9Sstevel@tonic-gate {
9397c478bd9Sstevel@tonic-gate 	if (eqep == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE))
9407c478bd9Sstevel@tonic-gate 		return;
9417c478bd9Sstevel@tonic-gate 
942267b64d5SStephen Hanson 	BT_ATOMIC_CLEAR(eqp->eq_bitmap, eqep - eqp->eq_elems);
9437c478bd9Sstevel@tonic-gate 
9441a5e258fSJosef 'Jeff' Sipek 	atomic_inc_64(&eqp->eq_kstat.eqk_cancelled.value.ui64);
9457c478bd9Sstevel@tonic-gate }
9467c478bd9Sstevel@tonic-gate 
9477c478bd9Sstevel@tonic-gate /*
9487c478bd9Sstevel@tonic-gate  * Write elements on the dump list of each nvlist errorq to the dump device.
949*bbf21555SRichard Lowe  * Upon reboot, fmd(8) will extract and replay them for diagnosis.
9507c478bd9Sstevel@tonic-gate  */
9517c478bd9Sstevel@tonic-gate void
errorq_dump(void)9527c478bd9Sstevel@tonic-gate errorq_dump(void)
9537c478bd9Sstevel@tonic-gate {
9547c478bd9Sstevel@tonic-gate 	errorq_elem_t *eep;
9557c478bd9Sstevel@tonic-gate 	errorq_t *eqp;
9567c478bd9Sstevel@tonic-gate 
9577c478bd9Sstevel@tonic-gate 	if (ereport_dumpbuf == NULL)
9587c478bd9Sstevel@tonic-gate 		return; /* reboot or panic before errorq is even set up */
9597c478bd9Sstevel@tonic-gate 
9607c478bd9Sstevel@tonic-gate 	for (eqp = errorq_list; eqp != NULL; eqp = eqp->eq_next) {
9617c478bd9Sstevel@tonic-gate 		if (!(eqp->eq_flags & ERRORQ_NVLIST) ||
9627c478bd9Sstevel@tonic-gate 		    !(eqp->eq_flags & ERRORQ_ACTIVE))
9637c478bd9Sstevel@tonic-gate 			continue; /* do not dump this queue on panic */
9647c478bd9Sstevel@tonic-gate 
9657c478bd9Sstevel@tonic-gate 		for (eep = eqp->eq_dump; eep != NULL; eep = eep->eqe_dump) {
9667c478bd9Sstevel@tonic-gate 			errorq_nvelem_t *eqnp = eep->eqe_data;
9677c478bd9Sstevel@tonic-gate 			size_t len = 0;
9687c478bd9Sstevel@tonic-gate 			erpt_dump_t ed;
9697c478bd9Sstevel@tonic-gate 			int err;
9707c478bd9Sstevel@tonic-gate 
9717c478bd9Sstevel@tonic-gate 			(void) nvlist_size(eqnp->eqn_nvl,
9727c478bd9Sstevel@tonic-gate 			    &len, NV_ENCODE_NATIVE);
9737c478bd9Sstevel@tonic-gate 
9747c478bd9Sstevel@tonic-gate 			if (len > ereport_dumplen || len == 0) {
9757c478bd9Sstevel@tonic-gate 				cmn_err(CE_WARN, "%s: unable to save error "
9767c478bd9Sstevel@tonic-gate 				    "report %p due to size %lu\n",
9777c478bd9Sstevel@tonic-gate 				    eqp->eq_name, (void *)eep, len);
9787c478bd9Sstevel@tonic-gate 				continue;
9797c478bd9Sstevel@tonic-gate 			}
9807c478bd9Sstevel@tonic-gate 
9817c478bd9Sstevel@tonic-gate 			if ((err = nvlist_pack(eqnp->eqn_nvl,
9827c478bd9Sstevel@tonic-gate 			    (char **)&ereport_dumpbuf, &ereport_dumplen,
9837c478bd9Sstevel@tonic-gate 			    NV_ENCODE_NATIVE, KM_NOSLEEP)) != 0) {
9847c478bd9Sstevel@tonic-gate 				cmn_err(CE_WARN, "%s: unable to save error "
9857c478bd9Sstevel@tonic-gate 				    "report %p due to pack error %d\n",
9867c478bd9Sstevel@tonic-gate 				    eqp->eq_name, (void *)eep, err);
9877c478bd9Sstevel@tonic-gate 				continue;
9887c478bd9Sstevel@tonic-gate 			}
9897c478bd9Sstevel@tonic-gate 
9907c478bd9Sstevel@tonic-gate 			ed.ed_magic = ERPT_MAGIC;
9917c478bd9Sstevel@tonic-gate 			ed.ed_chksum = checksum32(ereport_dumpbuf, len);
9927c478bd9Sstevel@tonic-gate 			ed.ed_size = (uint32_t)len;
9937c478bd9Sstevel@tonic-gate 			ed.ed_pad = 0;
9947c478bd9Sstevel@tonic-gate 			ed.ed_hrt_nsec = 0;
9957c478bd9Sstevel@tonic-gate 			ed.ed_hrt_base = panic_hrtime;
9967c478bd9Sstevel@tonic-gate 			ed.ed_tod_base.sec = panic_hrestime.tv_sec;
9977c478bd9Sstevel@tonic-gate 			ed.ed_tod_base.nsec = panic_hrestime.tv_nsec;
9987c478bd9Sstevel@tonic-gate 
9997c478bd9Sstevel@tonic-gate 			dumpvp_write(&ed, sizeof (ed));
10007c478bd9Sstevel@tonic-gate 			dumpvp_write(ereport_dumpbuf, len);
10017c478bd9Sstevel@tonic-gate 		}
10027c478bd9Sstevel@tonic-gate 	}
10037c478bd9Sstevel@tonic-gate }
10047c478bd9Sstevel@tonic-gate 
10057c478bd9Sstevel@tonic-gate nvlist_t *
errorq_elem_nvl(errorq_t * eqp,const errorq_elem_t * eqep)10067c478bd9Sstevel@tonic-gate errorq_elem_nvl(errorq_t *eqp, const errorq_elem_t *eqep)
10077c478bd9Sstevel@tonic-gate {
10087c478bd9Sstevel@tonic-gate 	errorq_nvelem_t *eqnp = eqep->eqe_data;
10097c478bd9Sstevel@tonic-gate 
10107c478bd9Sstevel@tonic-gate 	ASSERT(eqp->eq_flags & ERRORQ_ACTIVE && eqp->eq_flags & ERRORQ_NVLIST);
10117c478bd9Sstevel@tonic-gate 
10127c478bd9Sstevel@tonic-gate 	return (eqnp->eqn_nvl);
10137c478bd9Sstevel@tonic-gate }
10147c478bd9Sstevel@tonic-gate 
10157c478bd9Sstevel@tonic-gate nv_alloc_t *
errorq_elem_nva(errorq_t * eqp,const errorq_elem_t * eqep)10167c478bd9Sstevel@tonic-gate errorq_elem_nva(errorq_t *eqp, const errorq_elem_t *eqep)
10177c478bd9Sstevel@tonic-gate {
10187c478bd9Sstevel@tonic-gate 	errorq_nvelem_t *eqnp = eqep->eqe_data;
10197c478bd9Sstevel@tonic-gate 
10207c478bd9Sstevel@tonic-gate 	ASSERT(eqp->eq_flags & ERRORQ_ACTIVE && eqp->eq_flags & ERRORQ_NVLIST);
10217c478bd9Sstevel@tonic-gate 
10227c478bd9Sstevel@tonic-gate 	return (eqnp->eqn_nva);
10237c478bd9Sstevel@tonic-gate }
10247c478bd9Sstevel@tonic-gate 
10257c478bd9Sstevel@tonic-gate /*
10267c478bd9Sstevel@tonic-gate  * Reserve a new element and duplicate the data of the original into it.
10277c478bd9Sstevel@tonic-gate  */
10287c478bd9Sstevel@tonic-gate void *
errorq_elem_dup(errorq_t * eqp,const errorq_elem_t * eqep,errorq_elem_t ** neqep)10297c478bd9Sstevel@tonic-gate errorq_elem_dup(errorq_t *eqp, const errorq_elem_t *eqep, errorq_elem_t **neqep)
10307c478bd9Sstevel@tonic-gate {
10317c478bd9Sstevel@tonic-gate 	ASSERT(eqp->eq_flags & ERRORQ_ACTIVE);
10327c478bd9Sstevel@tonic-gate 	ASSERT(!(eqp->eq_flags & ERRORQ_NVLIST));
10337c478bd9Sstevel@tonic-gate 
10347c478bd9Sstevel@tonic-gate 	if ((*neqep = errorq_reserve(eqp)) == NULL)
10357c478bd9Sstevel@tonic-gate 		return (NULL);
10367c478bd9Sstevel@tonic-gate 
10377c478bd9Sstevel@tonic-gate 	bcopy(eqep->eqe_data, (*neqep)->eqe_data, eqp->eq_size);
10387c478bd9Sstevel@tonic-gate 	return ((*neqep)->eqe_data);
10397c478bd9Sstevel@tonic-gate }
1040