17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*d624471bSelowe * Common Development and Distribution License (the "License"). 6*d624471bSelowe * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*d624471bSelowe * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 277c478bd9Sstevel@tonic-gate 287c478bd9Sstevel@tonic-gate /* 297c478bd9Sstevel@tonic-gate * Kernel Error Queues 307c478bd9Sstevel@tonic-gate * 317c478bd9Sstevel@tonic-gate * A common problem when handling hardware error traps and interrupts is that 327c478bd9Sstevel@tonic-gate * these errors frequently must be handled at high interrupt level, where 337c478bd9Sstevel@tonic-gate * reliably producing error messages and safely examining and manipulating 347c478bd9Sstevel@tonic-gate * other kernel state may not be possible. The kernel error queue primitive is 357c478bd9Sstevel@tonic-gate * a common set of routines that allow a subsystem to maintain a queue of 367c478bd9Sstevel@tonic-gate * errors that can be processed by an explicit call from a safe context or by a 377c478bd9Sstevel@tonic-gate * soft interrupt that fires at a specific lower interrupt level. The queue 387c478bd9Sstevel@tonic-gate * management code also ensures that if the system panics, all in-transit 397c478bd9Sstevel@tonic-gate * errors are logged prior to reset. Each queue has an associated kstat for 407c478bd9Sstevel@tonic-gate * observing the number of errors dispatched and logged, and mdb(1) debugging 417c478bd9Sstevel@tonic-gate * support is provided for live and post-mortem observability. 427c478bd9Sstevel@tonic-gate * 437c478bd9Sstevel@tonic-gate * Memory Allocation 447c478bd9Sstevel@tonic-gate * 457c478bd9Sstevel@tonic-gate * All of the queue data structures are allocated in advance as part of 467c478bd9Sstevel@tonic-gate * the errorq_create() call. No additional memory allocations are 477c478bd9Sstevel@tonic-gate * performed as part of errorq_dispatch(), errorq_reserve(), 487c478bd9Sstevel@tonic-gate * errorq_commit() or errorq_drain(). This design 497c478bd9Sstevel@tonic-gate * facilitates reliable error queue processing even when the system is low 507c478bd9Sstevel@tonic-gate * on memory, and ensures that errorq_dispatch() can be called from any 517c478bd9Sstevel@tonic-gate * context. When the queue is created, the maximum queue length is 527c478bd9Sstevel@tonic-gate * specified as a parameter to errorq_create() errorq_nvcreate(). This 537c478bd9Sstevel@tonic-gate * length should represent a reasonable upper bound on the number of 547c478bd9Sstevel@tonic-gate * simultaneous errors. If errorq_dispatch() or errorq_reserve() is 557c478bd9Sstevel@tonic-gate * invoked and no free queue elements are available, the error is 567c478bd9Sstevel@tonic-gate * dropped and will not be logged. Typically, the queue will only be 577c478bd9Sstevel@tonic-gate * exhausted by an error storm, and in this case 587c478bd9Sstevel@tonic-gate * the earlier errors provide the most important data for analysis. 597c478bd9Sstevel@tonic-gate * When a new error is dispatched, the error data is copied into the 607c478bd9Sstevel@tonic-gate * preallocated queue element so that the caller's buffer can be reused. 617c478bd9Sstevel@tonic-gate * 627c478bd9Sstevel@tonic-gate * When a new error is reserved, an element is moved from the free list 637c478bd9Sstevel@tonic-gate * and returned to the caller. The element buffer data, eqe_data, may be 647c478bd9Sstevel@tonic-gate * managed by the caller and dispatched to the errorq by calling 657c478bd9Sstevel@tonic-gate * errorq_commit(). This is useful for additions to errorq's 667c478bd9Sstevel@tonic-gate * created with errorq_nvcreate() to handle name-value pair (nvpair) data. 677c478bd9Sstevel@tonic-gate * See below for a discussion on nvlist errorq's. 687c478bd9Sstevel@tonic-gate * 697c478bd9Sstevel@tonic-gate * Queue Drain Callback 707c478bd9Sstevel@tonic-gate * 717c478bd9Sstevel@tonic-gate * When the error queue is drained, the caller's queue drain callback is 727c478bd9Sstevel@tonic-gate * invoked with a pointer to the saved error data. This function may be 737c478bd9Sstevel@tonic-gate * called from passive kernel context or soft interrupt context at or 747c478bd9Sstevel@tonic-gate * below LOCK_LEVEL, or as part of panic(). As such, the callback should 757c478bd9Sstevel@tonic-gate * basically only be calling cmn_err (but NOT with the CE_PANIC flag). 767c478bd9Sstevel@tonic-gate * The callback must not call panic(), attempt to allocate memory, or wait 777c478bd9Sstevel@tonic-gate * on a condition variable. The callback may not call errorq_destroy() 787c478bd9Sstevel@tonic-gate * or errorq_drain() on the same error queue that called it. 797c478bd9Sstevel@tonic-gate * 807c478bd9Sstevel@tonic-gate * The queue drain callback will always be called for each pending error 817c478bd9Sstevel@tonic-gate * in the order in which errors were enqueued (oldest to newest). The 827c478bd9Sstevel@tonic-gate * queue drain callback is guaranteed to provide at *least* once semantics 837c478bd9Sstevel@tonic-gate * for all errors that are successfully dispatched (i.e. for which 847c478bd9Sstevel@tonic-gate * errorq_dispatch() has successfully completed). If an unrelated panic 857c478bd9Sstevel@tonic-gate * occurs while the queue drain callback is running on a vital queue, the 867c478bd9Sstevel@tonic-gate * panic subsystem will continue the queue drain and the callback may be 877c478bd9Sstevel@tonic-gate * invoked again for the same error. Therefore, the callback should 887c478bd9Sstevel@tonic-gate * restrict itself to logging messages and taking other actions that are 897c478bd9Sstevel@tonic-gate * not destructive if repeated. 907c478bd9Sstevel@tonic-gate * 917c478bd9Sstevel@tonic-gate * Name-Value Pair Error Queues 927c478bd9Sstevel@tonic-gate * 937c478bd9Sstevel@tonic-gate * During error handling, it may be more convenient to store error 947c478bd9Sstevel@tonic-gate * queue element data as a fixed buffer of name-value pairs. The 957c478bd9Sstevel@tonic-gate * nvpair library allows construction and destruction of nvlists in 967c478bd9Sstevel@tonic-gate * in pre-allocated memory buffers. 977c478bd9Sstevel@tonic-gate * 987c478bd9Sstevel@tonic-gate * Error queues created via errorq_nvcreate() store queue element 997c478bd9Sstevel@tonic-gate * data as fixed buffer nvlists (ereports). errorq_reserve() 1007c478bd9Sstevel@tonic-gate * allocates an errorq element from eqp->eq_free and returns a valid 1017c478bd9Sstevel@tonic-gate * pointer to a errorq_elem_t (queue element) and a pre-allocated 1027c478bd9Sstevel@tonic-gate * fixed buffer nvlist. errorq_elem_nvl() is used to gain access 1037c478bd9Sstevel@tonic-gate * to the nvlist to add name-value ereport members prior to 1047c478bd9Sstevel@tonic-gate * dispatching the error queue element in errorq_commit(). 1057c478bd9Sstevel@tonic-gate * 1067c478bd9Sstevel@tonic-gate * Once dispatched, the drain function will return the element to 1077c478bd9Sstevel@tonic-gate * eqp->eq_free and reset the associated nv_alloc structure. 1087c478bd9Sstevel@tonic-gate * error_cancel() may be called to cancel an element reservation 1097c478bd9Sstevel@tonic-gate * element that was never dispatched (committed). This is useful in 1107c478bd9Sstevel@tonic-gate * cases where a programming error prevents a queue element from being 1117c478bd9Sstevel@tonic-gate * dispatched. 1127c478bd9Sstevel@tonic-gate * 1137c478bd9Sstevel@tonic-gate * Queue Management 1147c478bd9Sstevel@tonic-gate * 1157c478bd9Sstevel@tonic-gate * The queue element structures and error data buffers are allocated in 1167c478bd9Sstevel@tonic-gate * two contiguous chunks as part of errorq_create() or errorq_nvcreate(). 1177c478bd9Sstevel@tonic-gate * Each queue element structure contains a next pointer, 1187c478bd9Sstevel@tonic-gate * a previous pointer, and a pointer to the corresponding error data 1197c478bd9Sstevel@tonic-gate * buffer. The data buffer for a nvlist errorq is a shared buffer 1207c478bd9Sstevel@tonic-gate * for the allocation of name-value pair lists. The elements are kept on 1217c478bd9Sstevel@tonic-gate * one of three lists: 1227c478bd9Sstevel@tonic-gate * 1237c478bd9Sstevel@tonic-gate * Unused elements are kept on the free list, a singly-linked list pointed 1247c478bd9Sstevel@tonic-gate * to by eqp->eq_free, and linked together using eqe_prev. The eqe_next 1257c478bd9Sstevel@tonic-gate * pointer is not used by the free list and will be set to NULL. 1267c478bd9Sstevel@tonic-gate * 1277c478bd9Sstevel@tonic-gate * Pending errors are kept on the pending list, a singly-linked list 1287c478bd9Sstevel@tonic-gate * pointed to by eqp->eq_pend, and linked together using eqe_prev. This 1297c478bd9Sstevel@tonic-gate * list is maintained in order from newest error to oldest. The eqe_next 1307c478bd9Sstevel@tonic-gate * pointer is not used by the pending list and will be set to NULL. 1317c478bd9Sstevel@tonic-gate * 1327c478bd9Sstevel@tonic-gate * The processing list is a doubly-linked list pointed to by eqp->eq_phead 1337c478bd9Sstevel@tonic-gate * (the oldest element) and eqp->eq_ptail (the newest element). The 1347c478bd9Sstevel@tonic-gate * eqe_next pointer is used to traverse from eq_phead to eq_ptail, and the 1357c478bd9Sstevel@tonic-gate * eqe_prev pointer is used to traverse from eq_ptail to eq_phead. Once a 1367c478bd9Sstevel@tonic-gate * queue drain operation begins, the current pending list is moved to the 1377c478bd9Sstevel@tonic-gate * processing list in a two-phase commit fashion, allowing the panic code 1387c478bd9Sstevel@tonic-gate * to always locate and process all pending errors in the event that a 1397c478bd9Sstevel@tonic-gate * panic occurs in the middle of queue processing. 1407c478bd9Sstevel@tonic-gate * 1417c478bd9Sstevel@tonic-gate * A fourth list is maintained for nvlist errorqs. The dump list, 1427c478bd9Sstevel@tonic-gate * eq_dump is used to link all errorq elements that should be stored 1437c478bd9Sstevel@tonic-gate * in a crash dump file in the event of a system panic. During 1447c478bd9Sstevel@tonic-gate * errorq_panic(), the list is created and subsequently traversed 1457c478bd9Sstevel@tonic-gate * in errorq_dump() during the final phases of a crash dump. 1467c478bd9Sstevel@tonic-gate * 1477c478bd9Sstevel@tonic-gate * Platform Considerations 1487c478bd9Sstevel@tonic-gate * 1497c478bd9Sstevel@tonic-gate * In order to simplify their implementation, error queues make use of the 1507c478bd9Sstevel@tonic-gate * C wrappers for compare-and-swap. If the platform itself does not 1517c478bd9Sstevel@tonic-gate * support compare-and-swap in hardware and the kernel emulation routines 1527c478bd9Sstevel@tonic-gate * are used instead, then the context in which errorq_dispatch() can be 1537c478bd9Sstevel@tonic-gate * safely invoked is further constrained by the implementation of the 1547c478bd9Sstevel@tonic-gate * compare-and-swap emulation. Specifically, if errorq_dispatch() is 1557c478bd9Sstevel@tonic-gate * called from a code path that can be executed above ATOMIC_LEVEL on such 1567c478bd9Sstevel@tonic-gate * a platform, the dispatch code could potentially deadlock unless the 1577c478bd9Sstevel@tonic-gate * corresponding error interrupt is blocked or disabled prior to calling 1587c478bd9Sstevel@tonic-gate * errorq_dispatch(). Error queues should therefore be deployed with 1597c478bd9Sstevel@tonic-gate * caution on these platforms. 1607c478bd9Sstevel@tonic-gate * 1617c478bd9Sstevel@tonic-gate * Interfaces 1627c478bd9Sstevel@tonic-gate * 1637c478bd9Sstevel@tonic-gate * errorq_t *errorq_create(name, func, private, qlen, eltsize, ipl, flags); 1647c478bd9Sstevel@tonic-gate * errorq_t *errorq_nvcreate(name, func, private, qlen, eltsize, ipl, flags); 1657c478bd9Sstevel@tonic-gate * 1667c478bd9Sstevel@tonic-gate * Create a new error queue with the specified name, callback, and 1677c478bd9Sstevel@tonic-gate * properties. A pointer to the new error queue is returned upon success, 1687c478bd9Sstevel@tonic-gate * or NULL is returned to indicate that the queue could not be created. 1697c478bd9Sstevel@tonic-gate * This function must be called from passive kernel context with no locks 1707c478bd9Sstevel@tonic-gate * held that can prevent a sleeping memory allocation from occurring. 1717c478bd9Sstevel@tonic-gate * errorq_create() will return failure if the queue kstats cannot be 1727c478bd9Sstevel@tonic-gate * created, or if a soft interrupt handler cannot be registered. 1737c478bd9Sstevel@tonic-gate * 1747c478bd9Sstevel@tonic-gate * The queue 'name' is a string that is recorded for live and post-mortem 1757c478bd9Sstevel@tonic-gate * examination by a debugger. The queue callback 'func' will be invoked 1767c478bd9Sstevel@tonic-gate * for each error drained from the queue, and will receive the 'private' 1777c478bd9Sstevel@tonic-gate * pointer as its first argument. The callback must obey the rules for 1787c478bd9Sstevel@tonic-gate * callbacks described above. The queue will have maximum length 'qlen' 1797c478bd9Sstevel@tonic-gate * and each element will be able to record up to 'eltsize' bytes of data. 1807c478bd9Sstevel@tonic-gate * The queue's soft interrupt (see errorq_dispatch(), below) will fire 1817c478bd9Sstevel@tonic-gate * at 'ipl', which should not exceed LOCK_LEVEL. The queue 'flags' may 1827c478bd9Sstevel@tonic-gate * include the following flag: 1837c478bd9Sstevel@tonic-gate * 1847c478bd9Sstevel@tonic-gate * ERRORQ_VITAL - This queue contains information that is considered 1857c478bd9Sstevel@tonic-gate * vital to problem diagnosis. Error queues that are marked vital will 1867c478bd9Sstevel@tonic-gate * be automatically drained by the panic subsystem prior to printing 1877c478bd9Sstevel@tonic-gate * the panic messages to the console. 1887c478bd9Sstevel@tonic-gate * 1897c478bd9Sstevel@tonic-gate * void errorq_destroy(errorq); 1907c478bd9Sstevel@tonic-gate * 1917c478bd9Sstevel@tonic-gate * Destroy the specified error queue. The queue is drained of any 1927c478bd9Sstevel@tonic-gate * pending elements and these are logged before errorq_destroy returns. 1937c478bd9Sstevel@tonic-gate * Once errorq_destroy() begins draining the queue, any simultaneous 1947c478bd9Sstevel@tonic-gate * calls to dispatch errors will result in the errors being dropped. 1957c478bd9Sstevel@tonic-gate * The caller must invoke a higher-level abstraction (e.g. disabling 1967c478bd9Sstevel@tonic-gate * an error interrupt) to ensure that error handling code does not 1977c478bd9Sstevel@tonic-gate * attempt to dispatch errors to the queue while it is being freed. 1987c478bd9Sstevel@tonic-gate * 1997c478bd9Sstevel@tonic-gate * void errorq_dispatch(errorq, data, len, flag); 2007c478bd9Sstevel@tonic-gate * 2017c478bd9Sstevel@tonic-gate * Attempt to enqueue the specified error data. If a free queue element 2027c478bd9Sstevel@tonic-gate * is available, the data is copied into a free element and placed on a 2037c478bd9Sstevel@tonic-gate * pending list. If no free queue element is available, the error is 2047c478bd9Sstevel@tonic-gate * dropped. The data length (len) is specified in bytes and should not 2057c478bd9Sstevel@tonic-gate * exceed the queue's maximum element size. If the data length is less 2067c478bd9Sstevel@tonic-gate * than the maximum element size, the remainder of the queue element is 2077c478bd9Sstevel@tonic-gate * filled with zeroes. The flag parameter should be one of: 2087c478bd9Sstevel@tonic-gate * 2097c478bd9Sstevel@tonic-gate * ERRORQ_ASYNC - Schedule a soft interrupt at the previously specified 2107c478bd9Sstevel@tonic-gate * IPL to asynchronously drain the queue on behalf of the caller. 2117c478bd9Sstevel@tonic-gate * 2127c478bd9Sstevel@tonic-gate * ERRORQ_SYNC - Do not schedule a soft interrupt to drain the queue. 2137c478bd9Sstevel@tonic-gate * The caller is presumed to be calling errorq_drain() or panic() in 2147c478bd9Sstevel@tonic-gate * the near future in order to drain the queue and log the error. 2157c478bd9Sstevel@tonic-gate * 2167c478bd9Sstevel@tonic-gate * The errorq_dispatch() function may be called from any context, subject 2177c478bd9Sstevel@tonic-gate * to the Platform Considerations described above. 2187c478bd9Sstevel@tonic-gate * 2197c478bd9Sstevel@tonic-gate * void errorq_drain(errorq); 2207c478bd9Sstevel@tonic-gate * 2217c478bd9Sstevel@tonic-gate * Drain the error queue of all pending errors. The queue's callback 2227c478bd9Sstevel@tonic-gate * function is invoked for each error in order from oldest to newest. 2237c478bd9Sstevel@tonic-gate * This function may be used at or below LOCK_LEVEL or from panic context. 2247c478bd9Sstevel@tonic-gate * 2257c478bd9Sstevel@tonic-gate * errorq_elem_t *errorq_reserve(errorq); 2267c478bd9Sstevel@tonic-gate * 2277c478bd9Sstevel@tonic-gate * Reserve an error queue element for later processing and dispatching. 2287c478bd9Sstevel@tonic-gate * The element is returned to the caller who may add error-specific data 2297c478bd9Sstevel@tonic-gate * to element. The element is retured to the free list when either 2307c478bd9Sstevel@tonic-gate * errorq_commit() is called and the element asynchronously processed 2317c478bd9Sstevel@tonic-gate * or immediately when errorq_cancel() is called. 2327c478bd9Sstevel@tonic-gate * 2337c478bd9Sstevel@tonic-gate * void errorq_commit(errorq, errorq_elem, flag); 2347c478bd9Sstevel@tonic-gate * 2357c478bd9Sstevel@tonic-gate * Commit an errorq element (eqep) for dispatching, see 2367c478bd9Sstevel@tonic-gate * errorq_dispatch(). 2377c478bd9Sstevel@tonic-gate * 2387c478bd9Sstevel@tonic-gate * void errorq_cancel(errorq, errorq_elem); 2397c478bd9Sstevel@tonic-gate * 2407c478bd9Sstevel@tonic-gate * Cancel a pending errorq element reservation. The errorq element is 2417c478bd9Sstevel@tonic-gate * returned to the free list upon cancelation. 2427c478bd9Sstevel@tonic-gate */ 2437c478bd9Sstevel@tonic-gate 2447c478bd9Sstevel@tonic-gate #include <sys/errorq_impl.h> 2457c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 2467c478bd9Sstevel@tonic-gate #include <sys/machlock.h> 2477c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 2487c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 2497c478bd9Sstevel@tonic-gate #include <sys/systm.h> 2507c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 2517c478bd9Sstevel@tonic-gate #include <sys/conf.h> 2527c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 2537c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> 2547c478bd9Sstevel@tonic-gate #include <sys/bootconf.h> 2557c478bd9Sstevel@tonic-gate #include <sys/spl.h> 2567c478bd9Sstevel@tonic-gate #include <sys/dumphdr.h> 2577c478bd9Sstevel@tonic-gate #include <sys/compress.h> 2587c478bd9Sstevel@tonic-gate #include <sys/time.h> 2597c478bd9Sstevel@tonic-gate #include <sys/panic.h> 2607c478bd9Sstevel@tonic-gate #include <sys/fm/protocol.h> 2617c478bd9Sstevel@tonic-gate #include <sys/fm/util.h> 2627c478bd9Sstevel@tonic-gate 2637c478bd9Sstevel@tonic-gate static struct errorq_kstat errorq_kstat_template = { 2647c478bd9Sstevel@tonic-gate { "dispatched", KSTAT_DATA_UINT64 }, 2657c478bd9Sstevel@tonic-gate { "dropped", KSTAT_DATA_UINT64 }, 2667c478bd9Sstevel@tonic-gate { "logged", KSTAT_DATA_UINT64 }, 2677c478bd9Sstevel@tonic-gate { "reserved", KSTAT_DATA_UINT64 }, 2687c478bd9Sstevel@tonic-gate { "reserve_fail", KSTAT_DATA_UINT64 }, 2697c478bd9Sstevel@tonic-gate { "committed", KSTAT_DATA_UINT64 }, 2707c478bd9Sstevel@tonic-gate { "commit_fail", KSTAT_DATA_UINT64 }, 2717c478bd9Sstevel@tonic-gate { "cancelled", KSTAT_DATA_UINT64 } 2727c478bd9Sstevel@tonic-gate }; 2737c478bd9Sstevel@tonic-gate 2747c478bd9Sstevel@tonic-gate static uint64_t errorq_lost = 0; 2757c478bd9Sstevel@tonic-gate static errorq_t *errorq_list = NULL; 2767c478bd9Sstevel@tonic-gate static kmutex_t errorq_lock; 2777c478bd9Sstevel@tonic-gate static uint64_t errorq_vitalmin = 5; 2787c478bd9Sstevel@tonic-gate 2797c478bd9Sstevel@tonic-gate static uint_t 2807c478bd9Sstevel@tonic-gate errorq_intr(caddr_t eqp) 2817c478bd9Sstevel@tonic-gate { 2827c478bd9Sstevel@tonic-gate errorq_drain((errorq_t *)eqp); 2837c478bd9Sstevel@tonic-gate return (DDI_INTR_CLAIMED); 2847c478bd9Sstevel@tonic-gate } 2857c478bd9Sstevel@tonic-gate 2867c478bd9Sstevel@tonic-gate /* 2877c478bd9Sstevel@tonic-gate * Create a new error queue with the specified properties and add a software 2887c478bd9Sstevel@tonic-gate * interrupt handler and kstat for it. This function must be called from 2897c478bd9Sstevel@tonic-gate * passive kernel context with no locks held that can prevent a sleeping 2907c478bd9Sstevel@tonic-gate * memory allocation from occurring. This function will return NULL if the 2917c478bd9Sstevel@tonic-gate * softint or kstat for this queue cannot be created. 2927c478bd9Sstevel@tonic-gate */ 2937c478bd9Sstevel@tonic-gate errorq_t * 2947c478bd9Sstevel@tonic-gate errorq_create(const char *name, errorq_func_t func, void *private, 2957c478bd9Sstevel@tonic-gate ulong_t qlen, size_t size, uint_t ipl, uint_t flags) 2967c478bd9Sstevel@tonic-gate { 2977c478bd9Sstevel@tonic-gate errorq_t *eqp = kmem_alloc(sizeof (errorq_t), KM_SLEEP); 2987c478bd9Sstevel@tonic-gate ddi_iblock_cookie_t ibc = (ddi_iblock_cookie_t)(uintptr_t)ipltospl(ipl); 2997c478bd9Sstevel@tonic-gate dev_info_t *dip = ddi_root_node(); 3007c478bd9Sstevel@tonic-gate 3017c478bd9Sstevel@tonic-gate errorq_elem_t *eep; 3027c478bd9Sstevel@tonic-gate ddi_softintr_t id = NULL; 3037c478bd9Sstevel@tonic-gate caddr_t data; 3047c478bd9Sstevel@tonic-gate 3057c478bd9Sstevel@tonic-gate ASSERT(qlen != 0 && size != 0); 3067c478bd9Sstevel@tonic-gate ASSERT(ipl > 0 && ipl <= LOCK_LEVEL); 3077c478bd9Sstevel@tonic-gate 3087c478bd9Sstevel@tonic-gate /* 3097c478bd9Sstevel@tonic-gate * If a queue is created very early in boot before device tree services 3107c478bd9Sstevel@tonic-gate * are available, the queue softint handler cannot be created. We 3117c478bd9Sstevel@tonic-gate * manually drain these queues and create their softint handlers when 3127c478bd9Sstevel@tonic-gate * it is safe to do so as part of errorq_init(), below. 3137c478bd9Sstevel@tonic-gate */ 3147c478bd9Sstevel@tonic-gate if (modrootloaded && ddi_add_softintr(dip, DDI_SOFTINT_FIXED, &id, 3157c478bd9Sstevel@tonic-gate &ibc, NULL, errorq_intr, (caddr_t)eqp) != DDI_SUCCESS) { 3167c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "errorq_create: failed to register " 3177c478bd9Sstevel@tonic-gate "IPL %u softint for queue %s", ipl, name); 3187c478bd9Sstevel@tonic-gate kmem_free(eqp, sizeof (errorq_t)); 3197c478bd9Sstevel@tonic-gate return (NULL); 3207c478bd9Sstevel@tonic-gate } 3217c478bd9Sstevel@tonic-gate 322*d624471bSelowe if ((eqp->eq_ksp = kstat_create("unix", 0, name, "errorq", 3237c478bd9Sstevel@tonic-gate KSTAT_TYPE_NAMED, sizeof (struct errorq_kstat) / 3247c478bd9Sstevel@tonic-gate sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) == NULL) { 3257c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "errorq_create: failed to create kstat " 3267c478bd9Sstevel@tonic-gate "for queue %s", name); 3277c478bd9Sstevel@tonic-gate if (id != NULL) 3287c478bd9Sstevel@tonic-gate ddi_remove_softintr(id); 3297c478bd9Sstevel@tonic-gate kmem_free(eqp, sizeof (errorq_t)); 3307c478bd9Sstevel@tonic-gate return (NULL); 3317c478bd9Sstevel@tonic-gate } 3327c478bd9Sstevel@tonic-gate 3337c478bd9Sstevel@tonic-gate bcopy(&errorq_kstat_template, &eqp->eq_kstat, 3347c478bd9Sstevel@tonic-gate sizeof (struct errorq_kstat)); 3357c478bd9Sstevel@tonic-gate eqp->eq_ksp->ks_data = &eqp->eq_kstat; 3367c478bd9Sstevel@tonic-gate eqp->eq_ksp->ks_private = eqp; 3377c478bd9Sstevel@tonic-gate kstat_install(eqp->eq_ksp); 3387c478bd9Sstevel@tonic-gate 3397c478bd9Sstevel@tonic-gate (void) strncpy(eqp->eq_name, name, ERRORQ_NAMELEN); 3407c478bd9Sstevel@tonic-gate eqp->eq_name[ERRORQ_NAMELEN] = '\0'; 3417c478bd9Sstevel@tonic-gate eqp->eq_func = func; 3427c478bd9Sstevel@tonic-gate eqp->eq_private = private; 3437c478bd9Sstevel@tonic-gate eqp->eq_data = kmem_alloc(qlen * size, KM_SLEEP); 3447c478bd9Sstevel@tonic-gate eqp->eq_qlen = qlen; 3457c478bd9Sstevel@tonic-gate eqp->eq_size = size; 3467c478bd9Sstevel@tonic-gate eqp->eq_ipl = ipl; 3477c478bd9Sstevel@tonic-gate eqp->eq_flags = flags | ERRORQ_ACTIVE; 3487c478bd9Sstevel@tonic-gate eqp->eq_id = id; 3497c478bd9Sstevel@tonic-gate mutex_init(&eqp->eq_lock, NULL, MUTEX_DEFAULT, NULL); 3507c478bd9Sstevel@tonic-gate eqp->eq_elems = kmem_alloc(qlen * sizeof (errorq_elem_t), KM_SLEEP); 3517c478bd9Sstevel@tonic-gate eqp->eq_phead = NULL; 3527c478bd9Sstevel@tonic-gate eqp->eq_ptail = NULL; 3537c478bd9Sstevel@tonic-gate eqp->eq_pend = NULL; 3547c478bd9Sstevel@tonic-gate eqp->eq_dump = NULL; 3557c478bd9Sstevel@tonic-gate eqp->eq_free = eqp->eq_elems; 3567c478bd9Sstevel@tonic-gate 3577c478bd9Sstevel@tonic-gate /* 3587c478bd9Sstevel@tonic-gate * Iterate over the array of errorq_elem_t structures and place each 3597c478bd9Sstevel@tonic-gate * one on the free list and set its data pointer. 3607c478bd9Sstevel@tonic-gate */ 3617c478bd9Sstevel@tonic-gate for (eep = eqp->eq_free, data = eqp->eq_data; qlen > 1; qlen--) { 3627c478bd9Sstevel@tonic-gate eep->eqe_next = NULL; 3637c478bd9Sstevel@tonic-gate eep->eqe_dump = NULL; 3647c478bd9Sstevel@tonic-gate eep->eqe_prev = eep + 1; 3657c478bd9Sstevel@tonic-gate eep->eqe_data = data; 3667c478bd9Sstevel@tonic-gate data += size; 3677c478bd9Sstevel@tonic-gate eep++; 3687c478bd9Sstevel@tonic-gate } 3697c478bd9Sstevel@tonic-gate 3707c478bd9Sstevel@tonic-gate eep->eqe_next = NULL; 3717c478bd9Sstevel@tonic-gate eep->eqe_prev = NULL; 3727c478bd9Sstevel@tonic-gate eep->eqe_data = data; 3737c478bd9Sstevel@tonic-gate eep->eqe_dump = NULL; 3747c478bd9Sstevel@tonic-gate 3757c478bd9Sstevel@tonic-gate /* 3767c478bd9Sstevel@tonic-gate * Once the errorq is initialized, add it to the global list of queues, 3777c478bd9Sstevel@tonic-gate * and then return a pointer to the new queue to the caller. 3787c478bd9Sstevel@tonic-gate */ 3797c478bd9Sstevel@tonic-gate mutex_enter(&errorq_lock); 3807c478bd9Sstevel@tonic-gate eqp->eq_next = errorq_list; 3817c478bd9Sstevel@tonic-gate errorq_list = eqp; 3827c478bd9Sstevel@tonic-gate mutex_exit(&errorq_lock); 3837c478bd9Sstevel@tonic-gate 3847c478bd9Sstevel@tonic-gate return (eqp); 3857c478bd9Sstevel@tonic-gate } 3867c478bd9Sstevel@tonic-gate 3877c478bd9Sstevel@tonic-gate /* 3887c478bd9Sstevel@tonic-gate * Create a new errorq as if by errorq_create(), but set the ERRORQ_NVLIST 3897c478bd9Sstevel@tonic-gate * flag and initialize each element to have the start of its data region used 3907c478bd9Sstevel@tonic-gate * as an errorq_nvelem_t with a nvlist allocator that consumes the data region. 3917c478bd9Sstevel@tonic-gate */ 3927c478bd9Sstevel@tonic-gate errorq_t * 3937c478bd9Sstevel@tonic-gate errorq_nvcreate(const char *name, errorq_func_t func, void *private, 3947c478bd9Sstevel@tonic-gate ulong_t qlen, size_t size, uint_t ipl, uint_t flags) 3957c478bd9Sstevel@tonic-gate { 3967c478bd9Sstevel@tonic-gate errorq_t *eqp; 3977c478bd9Sstevel@tonic-gate errorq_elem_t *eep; 3987c478bd9Sstevel@tonic-gate 3997c478bd9Sstevel@tonic-gate eqp = errorq_create(name, func, private, qlen, 4007c478bd9Sstevel@tonic-gate size + sizeof (errorq_nvelem_t), ipl, flags | ERRORQ_NVLIST); 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate if (eqp == NULL) 4037c478bd9Sstevel@tonic-gate return (NULL); 4047c478bd9Sstevel@tonic-gate 4057c478bd9Sstevel@tonic-gate mutex_enter(&eqp->eq_lock); 4067c478bd9Sstevel@tonic-gate 4077c478bd9Sstevel@tonic-gate for (eep = eqp->eq_elems; qlen != 0; eep++, qlen--) { 4087c478bd9Sstevel@tonic-gate errorq_nvelem_t *eqnp = eep->eqe_data; 4097c478bd9Sstevel@tonic-gate eqnp->eqn_buf = (char *)eqnp + sizeof (errorq_nvelem_t); 4107c478bd9Sstevel@tonic-gate eqnp->eqn_nva = fm_nva_xcreate(eqnp->eqn_buf, size); 4117c478bd9Sstevel@tonic-gate } 4127c478bd9Sstevel@tonic-gate 4137c478bd9Sstevel@tonic-gate mutex_exit(&eqp->eq_lock); 4147c478bd9Sstevel@tonic-gate return (eqp); 4157c478bd9Sstevel@tonic-gate } 4167c478bd9Sstevel@tonic-gate 4177c478bd9Sstevel@tonic-gate /* 4187c478bd9Sstevel@tonic-gate * To destroy an error queue, we mark it as disabled and then explicitly drain 4197c478bd9Sstevel@tonic-gate * all pending errors. Once the drain is complete, we can remove the queue 4207c478bd9Sstevel@tonic-gate * from the global list of queues examined by errorq_panic(), and then free 4217c478bd9Sstevel@tonic-gate * the various queue data structures. The caller must use some higher-level 4227c478bd9Sstevel@tonic-gate * abstraction (e.g. disabling an error interrupt) to ensure that no one will 4237c478bd9Sstevel@tonic-gate * attempt to enqueue new errors while we are freeing this queue. 4247c478bd9Sstevel@tonic-gate */ 4257c478bd9Sstevel@tonic-gate void 4267c478bd9Sstevel@tonic-gate errorq_destroy(errorq_t *eqp) 4277c478bd9Sstevel@tonic-gate { 4287c478bd9Sstevel@tonic-gate errorq_t *p, **pp; 4297c478bd9Sstevel@tonic-gate errorq_elem_t *eep; 4307c478bd9Sstevel@tonic-gate ulong_t i; 4317c478bd9Sstevel@tonic-gate 4327c478bd9Sstevel@tonic-gate ASSERT(eqp != NULL); 4337c478bd9Sstevel@tonic-gate eqp->eq_flags &= ~ERRORQ_ACTIVE; 4347c478bd9Sstevel@tonic-gate errorq_drain(eqp); 4357c478bd9Sstevel@tonic-gate 4367c478bd9Sstevel@tonic-gate mutex_enter(&errorq_lock); 4377c478bd9Sstevel@tonic-gate pp = &errorq_list; 4387c478bd9Sstevel@tonic-gate 4397c478bd9Sstevel@tonic-gate for (p = errorq_list; p != NULL; p = p->eq_next) { 4407c478bd9Sstevel@tonic-gate if (p == eqp) { 4417c478bd9Sstevel@tonic-gate *pp = p->eq_next; 4427c478bd9Sstevel@tonic-gate break; 4437c478bd9Sstevel@tonic-gate } 4447c478bd9Sstevel@tonic-gate pp = &p->eq_next; 4457c478bd9Sstevel@tonic-gate } 4467c478bd9Sstevel@tonic-gate 4477c478bd9Sstevel@tonic-gate mutex_exit(&errorq_lock); 4487c478bd9Sstevel@tonic-gate ASSERT(p != NULL); 4497c478bd9Sstevel@tonic-gate 4507c478bd9Sstevel@tonic-gate if (eqp->eq_flags & ERRORQ_NVLIST) { 4517c478bd9Sstevel@tonic-gate for (eep = eqp->eq_elems, i = 0; i < eqp->eq_qlen; i++, eep++) { 4527c478bd9Sstevel@tonic-gate errorq_nvelem_t *eqnp = eep->eqe_data; 4537c478bd9Sstevel@tonic-gate fm_nva_xdestroy(eqnp->eqn_nva); 4547c478bd9Sstevel@tonic-gate } 4557c478bd9Sstevel@tonic-gate } 4567c478bd9Sstevel@tonic-gate 4577c478bd9Sstevel@tonic-gate mutex_destroy(&eqp->eq_lock); 4587c478bd9Sstevel@tonic-gate kstat_delete(eqp->eq_ksp); 4597c478bd9Sstevel@tonic-gate 4607c478bd9Sstevel@tonic-gate if (eqp->eq_id != NULL) 4617c478bd9Sstevel@tonic-gate ddi_remove_softintr(eqp->eq_id); 4627c478bd9Sstevel@tonic-gate 4637c478bd9Sstevel@tonic-gate kmem_free(eqp->eq_elems, eqp->eq_qlen * sizeof (errorq_elem_t)); 4647c478bd9Sstevel@tonic-gate kmem_free(eqp->eq_data, eqp->eq_qlen * eqp->eq_size); 4657c478bd9Sstevel@tonic-gate 4667c478bd9Sstevel@tonic-gate kmem_free(eqp, sizeof (errorq_t)); 4677c478bd9Sstevel@tonic-gate } 4687c478bd9Sstevel@tonic-gate 4697c478bd9Sstevel@tonic-gate /* 4707c478bd9Sstevel@tonic-gate * Dispatch a new error into the queue for later processing. The specified 4717c478bd9Sstevel@tonic-gate * data buffer is copied into a preallocated queue element. If 'len' is 4727c478bd9Sstevel@tonic-gate * smaller than the queue element size, the remainder of the queue element is 4737c478bd9Sstevel@tonic-gate * filled with zeroes. This function may be called from any context subject 4747c478bd9Sstevel@tonic-gate * to the Platform Considerations described above. 4757c478bd9Sstevel@tonic-gate */ 4767c478bd9Sstevel@tonic-gate void 4777c478bd9Sstevel@tonic-gate errorq_dispatch(errorq_t *eqp, const void *data, size_t len, uint_t flag) 4787c478bd9Sstevel@tonic-gate { 4797c478bd9Sstevel@tonic-gate errorq_elem_t *eep, *old; 4807c478bd9Sstevel@tonic-gate 4817c478bd9Sstevel@tonic-gate if (eqp == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) { 4827c478bd9Sstevel@tonic-gate atomic_add_64(&errorq_lost, 1); 4837c478bd9Sstevel@tonic-gate return; /* drop error if queue is uninitialized or disabled */ 4847c478bd9Sstevel@tonic-gate } 4857c478bd9Sstevel@tonic-gate 4867c478bd9Sstevel@tonic-gate while ((eep = eqp->eq_free) != NULL) { 4877c478bd9Sstevel@tonic-gate if (casptr(&eqp->eq_free, eep, eep->eqe_prev) == eep) 4887c478bd9Sstevel@tonic-gate break; 4897c478bd9Sstevel@tonic-gate } 4907c478bd9Sstevel@tonic-gate 4917c478bd9Sstevel@tonic-gate if (eep == NULL) { 4927c478bd9Sstevel@tonic-gate atomic_add_64(&eqp->eq_kstat.eqk_dropped.value.ui64, 1); 4937c478bd9Sstevel@tonic-gate return; 4947c478bd9Sstevel@tonic-gate } 4957c478bd9Sstevel@tonic-gate 4967c478bd9Sstevel@tonic-gate ASSERT(len <= eqp->eq_size); 4977c478bd9Sstevel@tonic-gate bcopy(data, eep->eqe_data, MIN(eqp->eq_size, len)); 4987c478bd9Sstevel@tonic-gate 4997c478bd9Sstevel@tonic-gate if (len < eqp->eq_size) 5007c478bd9Sstevel@tonic-gate bzero((caddr_t)eep->eqe_data + len, eqp->eq_size - len); 5017c478bd9Sstevel@tonic-gate 5027c478bd9Sstevel@tonic-gate for (;;) { 5037c478bd9Sstevel@tonic-gate old = eqp->eq_pend; 5047c478bd9Sstevel@tonic-gate eep->eqe_prev = old; 5057c478bd9Sstevel@tonic-gate membar_producer(); 5067c478bd9Sstevel@tonic-gate 5077c478bd9Sstevel@tonic-gate if (casptr(&eqp->eq_pend, old, eep) == old) 5087c478bd9Sstevel@tonic-gate break; 5097c478bd9Sstevel@tonic-gate } 5107c478bd9Sstevel@tonic-gate 5117c478bd9Sstevel@tonic-gate atomic_add_64(&eqp->eq_kstat.eqk_dispatched.value.ui64, 1); 5127c478bd9Sstevel@tonic-gate 5137c478bd9Sstevel@tonic-gate if (flag == ERRORQ_ASYNC && eqp->eq_id != NULL) 5147c478bd9Sstevel@tonic-gate ddi_trigger_softintr(eqp->eq_id); 5157c478bd9Sstevel@tonic-gate } 5167c478bd9Sstevel@tonic-gate 5177c478bd9Sstevel@tonic-gate /* 5187c478bd9Sstevel@tonic-gate * Drain the specified error queue by calling eq_func() for each pending error. 5197c478bd9Sstevel@tonic-gate * This function must be called at or below LOCK_LEVEL or from panic context. 5207c478bd9Sstevel@tonic-gate * In order to synchronize with other attempts to drain the queue, we acquire 5217c478bd9Sstevel@tonic-gate * the adaptive eq_lock, blocking other consumers. Once this lock is held, 5227c478bd9Sstevel@tonic-gate * we must use compare-and-swap to move the pending list to the processing 5237c478bd9Sstevel@tonic-gate * list and to return elements to the free list in order to synchronize 5247c478bd9Sstevel@tonic-gate * with producers, who do not acquire any locks and only use compare-and-swap. 5257c478bd9Sstevel@tonic-gate * 5267c478bd9Sstevel@tonic-gate * An additional constraint on this function is that if the system panics 5277c478bd9Sstevel@tonic-gate * while this function is running, the panic code must be able to detect and 5287c478bd9Sstevel@tonic-gate * handle all intermediate states and correctly dequeue all errors. The 5297c478bd9Sstevel@tonic-gate * errorq_panic() function below will be used for detecting and handling 5307c478bd9Sstevel@tonic-gate * these intermediate states. The comments in errorq_drain() below explain 5317c478bd9Sstevel@tonic-gate * how we make sure each intermediate state is distinct and consistent. 5327c478bd9Sstevel@tonic-gate */ 5337c478bd9Sstevel@tonic-gate void 5347c478bd9Sstevel@tonic-gate errorq_drain(errorq_t *eqp) 5357c478bd9Sstevel@tonic-gate { 5367c478bd9Sstevel@tonic-gate errorq_elem_t *eep, *fep, *dep; 5377c478bd9Sstevel@tonic-gate 5387c478bd9Sstevel@tonic-gate ASSERT(eqp != NULL); 5397c478bd9Sstevel@tonic-gate mutex_enter(&eqp->eq_lock); 5407c478bd9Sstevel@tonic-gate 5417c478bd9Sstevel@tonic-gate /* 5427c478bd9Sstevel@tonic-gate * If there are one or more pending errors, set eq_ptail to point to 5437c478bd9Sstevel@tonic-gate * the first element on the pending list and then attempt to compare- 5447c478bd9Sstevel@tonic-gate * and-swap NULL to the pending list. We use membar_producer() to 5457c478bd9Sstevel@tonic-gate * make sure that eq_ptail will be visible to errorq_panic() below 5467c478bd9Sstevel@tonic-gate * before the pending list is NULLed out. This section is labeled 5477c478bd9Sstevel@tonic-gate * case (1) for errorq_panic, below. If eq_ptail is not yet set (1A) 5487c478bd9Sstevel@tonic-gate * eq_pend has all the pending errors. If casptr fails or has not 5497c478bd9Sstevel@tonic-gate * been called yet (1B), eq_pend still has all the pending errors. 5507c478bd9Sstevel@tonic-gate * If casptr succeeds (1C), eq_ptail has all the pending errors. 5517c478bd9Sstevel@tonic-gate */ 5527c478bd9Sstevel@tonic-gate while ((eep = eqp->eq_pend) != NULL) { 5537c478bd9Sstevel@tonic-gate eqp->eq_ptail = eep; 5547c478bd9Sstevel@tonic-gate membar_producer(); 5557c478bd9Sstevel@tonic-gate 5567c478bd9Sstevel@tonic-gate if (casptr(&eqp->eq_pend, eep, NULL) == eep) 5577c478bd9Sstevel@tonic-gate break; 5587c478bd9Sstevel@tonic-gate } 5597c478bd9Sstevel@tonic-gate 5607c478bd9Sstevel@tonic-gate /* 5617c478bd9Sstevel@tonic-gate * If no errors were pending, assert that eq_ptail is set to NULL, 5627c478bd9Sstevel@tonic-gate * drop the consumer lock, and return without doing anything. 5637c478bd9Sstevel@tonic-gate */ 5647c478bd9Sstevel@tonic-gate if (eep == NULL) { 5657c478bd9Sstevel@tonic-gate ASSERT(eqp->eq_ptail == NULL); 5667c478bd9Sstevel@tonic-gate mutex_exit(&eqp->eq_lock); 5677c478bd9Sstevel@tonic-gate return; 5687c478bd9Sstevel@tonic-gate } 5697c478bd9Sstevel@tonic-gate 5707c478bd9Sstevel@tonic-gate /* 5717c478bd9Sstevel@tonic-gate * Now iterate from eq_ptail (a.k.a. eep, the newest error) to the 5727c478bd9Sstevel@tonic-gate * oldest error, setting the eqe_next pointer so that we can iterate 5737c478bd9Sstevel@tonic-gate * over the errors from oldest to newest. We use membar_producer() 5747c478bd9Sstevel@tonic-gate * to make sure that these stores are visible before we set eq_phead. 5757c478bd9Sstevel@tonic-gate * If we panic before, during, or just after this loop (case 2), 5767c478bd9Sstevel@tonic-gate * errorq_panic() will simply redo this work, as described below. 5777c478bd9Sstevel@tonic-gate */ 5787c478bd9Sstevel@tonic-gate for (eep->eqe_next = NULL; eep->eqe_prev != NULL; eep = eep->eqe_prev) 5797c478bd9Sstevel@tonic-gate eep->eqe_prev->eqe_next = eep; 5807c478bd9Sstevel@tonic-gate membar_producer(); 5817c478bd9Sstevel@tonic-gate 5827c478bd9Sstevel@tonic-gate /* 5837c478bd9Sstevel@tonic-gate * Now set eq_phead to the head of the processing list (the oldest 5847c478bd9Sstevel@tonic-gate * error) and issue another membar_producer() to make sure that 5857c478bd9Sstevel@tonic-gate * eq_phead is seen as non-NULL before we clear eq_ptail. If we panic 5867c478bd9Sstevel@tonic-gate * after eq_phead is set (case 3), we will detect and log these errors 5877c478bd9Sstevel@tonic-gate * in errorq_panic(), as described below. 5887c478bd9Sstevel@tonic-gate */ 5897c478bd9Sstevel@tonic-gate eqp->eq_phead = eep; 5907c478bd9Sstevel@tonic-gate membar_producer(); 5917c478bd9Sstevel@tonic-gate 5927c478bd9Sstevel@tonic-gate eqp->eq_ptail = NULL; 5937c478bd9Sstevel@tonic-gate membar_producer(); 5947c478bd9Sstevel@tonic-gate 5957c478bd9Sstevel@tonic-gate /* 5967c478bd9Sstevel@tonic-gate * If we enter from errorq_panic_drain(), we may already have 5977c478bd9Sstevel@tonic-gate * errorq elements on the dump list. Find the tail of 5987c478bd9Sstevel@tonic-gate * the list ready for append. 5997c478bd9Sstevel@tonic-gate */ 6007c478bd9Sstevel@tonic-gate if (panicstr && (dep = eqp->eq_dump) != NULL) { 6017c478bd9Sstevel@tonic-gate while (dep->eqe_dump != NULL) 6027c478bd9Sstevel@tonic-gate dep = dep->eqe_dump; 6037c478bd9Sstevel@tonic-gate } 6047c478bd9Sstevel@tonic-gate 6057c478bd9Sstevel@tonic-gate /* 6067c478bd9Sstevel@tonic-gate * Now iterate over the processing list from oldest (eq_phead) to 6077c478bd9Sstevel@tonic-gate * newest and log each error. Once an error is logged, we use 6087c478bd9Sstevel@tonic-gate * compare-and-swap to return it to the free list. If we panic before, 6097c478bd9Sstevel@tonic-gate * during, or after calling eq_func() (case 4), the error will still be 6107c478bd9Sstevel@tonic-gate * found on eq_phead and will be logged in errorq_panic below. 6117c478bd9Sstevel@tonic-gate */ 6127c478bd9Sstevel@tonic-gate 6137c478bd9Sstevel@tonic-gate while ((eep = eqp->eq_phead) != NULL) { 6147c478bd9Sstevel@tonic-gate eqp->eq_func(eqp->eq_private, eep->eqe_data, eep); 6157c478bd9Sstevel@tonic-gate eqp->eq_kstat.eqk_logged.value.ui64++; 6167c478bd9Sstevel@tonic-gate 6177c478bd9Sstevel@tonic-gate eqp->eq_phead = eep->eqe_next; 6187c478bd9Sstevel@tonic-gate membar_producer(); 6197c478bd9Sstevel@tonic-gate 6207c478bd9Sstevel@tonic-gate eep->eqe_next = NULL; 6217c478bd9Sstevel@tonic-gate 6227c478bd9Sstevel@tonic-gate for (;;) { 6237c478bd9Sstevel@tonic-gate fep = eqp->eq_free; 6247c478bd9Sstevel@tonic-gate eep->eqe_prev = fep; 6257c478bd9Sstevel@tonic-gate membar_producer(); 6267c478bd9Sstevel@tonic-gate 6277c478bd9Sstevel@tonic-gate if (casptr(&eqp->eq_free, fep, eep) == fep) 6287c478bd9Sstevel@tonic-gate break; 6297c478bd9Sstevel@tonic-gate } 6307c478bd9Sstevel@tonic-gate 6317c478bd9Sstevel@tonic-gate /* 6327c478bd9Sstevel@tonic-gate * On panic, we add the element to the dump list for each 6337c478bd9Sstevel@tonic-gate * nvlist errorq. Elements are stored oldest to newest. 6347c478bd9Sstevel@tonic-gate */ 6357c478bd9Sstevel@tonic-gate if (panicstr && (eqp->eq_flags & ERRORQ_NVLIST)) { 6367c478bd9Sstevel@tonic-gate if (eqp->eq_dump == NULL) 6377c478bd9Sstevel@tonic-gate dep = eqp->eq_dump = eep; 6387c478bd9Sstevel@tonic-gate else 6397c478bd9Sstevel@tonic-gate dep = dep->eqe_dump = eep; 6407c478bd9Sstevel@tonic-gate membar_producer(); 6417c478bd9Sstevel@tonic-gate } 6427c478bd9Sstevel@tonic-gate } 6437c478bd9Sstevel@tonic-gate 6447c478bd9Sstevel@tonic-gate mutex_exit(&eqp->eq_lock); 6457c478bd9Sstevel@tonic-gate } 6467c478bd9Sstevel@tonic-gate 6477c478bd9Sstevel@tonic-gate /* 6487c478bd9Sstevel@tonic-gate * Now that device tree services are available, set up the soft interrupt 6497c478bd9Sstevel@tonic-gate * handlers for any queues that were created early in boot. We then 6507c478bd9Sstevel@tonic-gate * manually drain these queues to report any pending early errors. 6517c478bd9Sstevel@tonic-gate */ 6527c478bd9Sstevel@tonic-gate void 6537c478bd9Sstevel@tonic-gate errorq_init(void) 6547c478bd9Sstevel@tonic-gate { 6557c478bd9Sstevel@tonic-gate dev_info_t *dip = ddi_root_node(); 6567c478bd9Sstevel@tonic-gate ddi_softintr_t id; 6577c478bd9Sstevel@tonic-gate errorq_t *eqp; 6587c478bd9Sstevel@tonic-gate 6597c478bd9Sstevel@tonic-gate ASSERT(modrootloaded != 0); 6607c478bd9Sstevel@tonic-gate ASSERT(dip != NULL); 6617c478bd9Sstevel@tonic-gate 6627c478bd9Sstevel@tonic-gate mutex_enter(&errorq_lock); 6637c478bd9Sstevel@tonic-gate 6647c478bd9Sstevel@tonic-gate for (eqp = errorq_list; eqp != NULL; eqp = eqp->eq_next) { 6657c478bd9Sstevel@tonic-gate ddi_iblock_cookie_t ibc = 6667c478bd9Sstevel@tonic-gate (ddi_iblock_cookie_t)(uintptr_t)ipltospl(eqp->eq_ipl); 6677c478bd9Sstevel@tonic-gate 6687c478bd9Sstevel@tonic-gate if (eqp->eq_id != NULL) 6697c478bd9Sstevel@tonic-gate continue; /* softint already initialized */ 6707c478bd9Sstevel@tonic-gate 6717c478bd9Sstevel@tonic-gate if (ddi_add_softintr(dip, DDI_SOFTINT_FIXED, &id, &ibc, NULL, 6727c478bd9Sstevel@tonic-gate errorq_intr, (caddr_t)eqp) != DDI_SUCCESS) { 6737c478bd9Sstevel@tonic-gate panic("errorq_init: failed to register IPL %u softint " 6747c478bd9Sstevel@tonic-gate "for queue %s", eqp->eq_ipl, eqp->eq_name); 6757c478bd9Sstevel@tonic-gate } 6767c478bd9Sstevel@tonic-gate 6777c478bd9Sstevel@tonic-gate eqp->eq_id = id; 6787c478bd9Sstevel@tonic-gate errorq_drain(eqp); 6797c478bd9Sstevel@tonic-gate } 6807c478bd9Sstevel@tonic-gate 6817c478bd9Sstevel@tonic-gate mutex_exit(&errorq_lock); 6827c478bd9Sstevel@tonic-gate } 6837c478bd9Sstevel@tonic-gate 6847c478bd9Sstevel@tonic-gate /* 6857c478bd9Sstevel@tonic-gate * This function is designed to be called from panic context only, and 6867c478bd9Sstevel@tonic-gate * therefore does not need to acquire errorq_lock when iterating over 6877c478bd9Sstevel@tonic-gate * errorq_list. This function must be called no more than once for each 6887c478bd9Sstevel@tonic-gate * 'what' value (if you change this then review the manipulation of 'dep'. 6897c478bd9Sstevel@tonic-gate */ 6907c478bd9Sstevel@tonic-gate static uint64_t 6917c478bd9Sstevel@tonic-gate errorq_panic_drain(uint_t what) 6927c478bd9Sstevel@tonic-gate { 6937c478bd9Sstevel@tonic-gate errorq_elem_t *eep, *nep, *fep, *dep; 6947c478bd9Sstevel@tonic-gate errorq_t *eqp; 6957c478bd9Sstevel@tonic-gate uint64_t loggedtmp; 6967c478bd9Sstevel@tonic-gate uint64_t logged = 0; 6977c478bd9Sstevel@tonic-gate 6987c478bd9Sstevel@tonic-gate for (eqp = errorq_list; eqp != NULL; eqp = eqp->eq_next) { 6997c478bd9Sstevel@tonic-gate if ((eqp->eq_flags & (ERRORQ_VITAL | ERRORQ_NVLIST)) != what) 7007c478bd9Sstevel@tonic-gate continue; /* do not drain this queue on this pass */ 7017c478bd9Sstevel@tonic-gate 7027c478bd9Sstevel@tonic-gate loggedtmp = eqp->eq_kstat.eqk_logged.value.ui64; 7037c478bd9Sstevel@tonic-gate 7047c478bd9Sstevel@tonic-gate /* 7057c478bd9Sstevel@tonic-gate * In case (1B) above, eq_ptail may be set but the casptr may 7067c478bd9Sstevel@tonic-gate * not have been executed yet or may have failed. Either way, 7077c478bd9Sstevel@tonic-gate * we must log errors in chronological order. So we search 7087c478bd9Sstevel@tonic-gate * the pending list for the error pointed to by eq_ptail. If 7097c478bd9Sstevel@tonic-gate * it is found, we know that all subsequent errors are also 7107c478bd9Sstevel@tonic-gate * still on the pending list, so just NULL out eq_ptail and let 7117c478bd9Sstevel@tonic-gate * errorq_drain(), below, take care of the logging. 7127c478bd9Sstevel@tonic-gate */ 7137c478bd9Sstevel@tonic-gate for (eep = eqp->eq_pend; eep != NULL; eep = eep->eqe_prev) { 7147c478bd9Sstevel@tonic-gate if (eep == eqp->eq_ptail) { 7157c478bd9Sstevel@tonic-gate ASSERT(eqp->eq_phead == NULL); 7167c478bd9Sstevel@tonic-gate eqp->eq_ptail = NULL; 7177c478bd9Sstevel@tonic-gate break; 7187c478bd9Sstevel@tonic-gate } 7197c478bd9Sstevel@tonic-gate } 7207c478bd9Sstevel@tonic-gate 7217c478bd9Sstevel@tonic-gate /* 7227c478bd9Sstevel@tonic-gate * In cases (1C) and (2) above, eq_ptail will be set to the 7237c478bd9Sstevel@tonic-gate * newest error on the processing list but eq_phead will still 7247c478bd9Sstevel@tonic-gate * be NULL. We set the eqe_next pointers so we can iterate 7257c478bd9Sstevel@tonic-gate * over the processing list in order from oldest error to the 7267c478bd9Sstevel@tonic-gate * newest error. We then set eq_phead to point to the oldest 7277c478bd9Sstevel@tonic-gate * error and fall into the for-loop below. 7287c478bd9Sstevel@tonic-gate */ 7297c478bd9Sstevel@tonic-gate if (eqp->eq_phead == NULL && (eep = eqp->eq_ptail) != NULL) { 7307c478bd9Sstevel@tonic-gate for (eep->eqe_next = NULL; eep->eqe_prev != NULL; 7317c478bd9Sstevel@tonic-gate eep = eep->eqe_prev) 7327c478bd9Sstevel@tonic-gate eep->eqe_prev->eqe_next = eep; 7337c478bd9Sstevel@tonic-gate 7347c478bd9Sstevel@tonic-gate eqp->eq_phead = eep; 7357c478bd9Sstevel@tonic-gate eqp->eq_ptail = NULL; 7367c478bd9Sstevel@tonic-gate } 7377c478bd9Sstevel@tonic-gate 7387c478bd9Sstevel@tonic-gate /* 7397c478bd9Sstevel@tonic-gate * In cases (3) and (4) above (or after case (1C/2) handling), 7407c478bd9Sstevel@tonic-gate * eq_phead will be set to the oldest error on the processing 7417c478bd9Sstevel@tonic-gate * list. We log each error and return it to the free list. 7427c478bd9Sstevel@tonic-gate * 7437c478bd9Sstevel@tonic-gate * Unlike errorq_drain(), we don't need to worry about updating 7447c478bd9Sstevel@tonic-gate * eq_phead because errorq_panic() will be called at most once. 7457c478bd9Sstevel@tonic-gate * However, we must use casptr to update the freelist in case 7467c478bd9Sstevel@tonic-gate * errors are still being enqueued during panic. 7477c478bd9Sstevel@tonic-gate */ 7487c478bd9Sstevel@tonic-gate for (eep = eqp->eq_phead; eep != NULL; eep = nep) { 7497c478bd9Sstevel@tonic-gate eqp->eq_func(eqp->eq_private, eep->eqe_data, eep); 7507c478bd9Sstevel@tonic-gate eqp->eq_kstat.eqk_logged.value.ui64++; 7517c478bd9Sstevel@tonic-gate 7527c478bd9Sstevel@tonic-gate nep = eep->eqe_next; 7537c478bd9Sstevel@tonic-gate eep->eqe_next = NULL; 7547c478bd9Sstevel@tonic-gate 7557c478bd9Sstevel@tonic-gate for (;;) { 7567c478bd9Sstevel@tonic-gate fep = eqp->eq_free; 7577c478bd9Sstevel@tonic-gate eep->eqe_prev = fep; 7587c478bd9Sstevel@tonic-gate membar_producer(); 7597c478bd9Sstevel@tonic-gate 7607c478bd9Sstevel@tonic-gate if (casptr(&eqp->eq_free, fep, eep) == fep) 7617c478bd9Sstevel@tonic-gate break; 7627c478bd9Sstevel@tonic-gate } 7637c478bd9Sstevel@tonic-gate 7647c478bd9Sstevel@tonic-gate /* 7657c478bd9Sstevel@tonic-gate * On panic, we add the element to the dump list for 7667c478bd9Sstevel@tonic-gate * each nvlist errorq, stored oldest to newest. 7677c478bd9Sstevel@tonic-gate */ 7687c478bd9Sstevel@tonic-gate if (eqp->eq_flags & ERRORQ_NVLIST) { 7697c478bd9Sstevel@tonic-gate if (eqp->eq_dump == NULL) 7707c478bd9Sstevel@tonic-gate dep = eqp->eq_dump = eep; 7717c478bd9Sstevel@tonic-gate else 7727c478bd9Sstevel@tonic-gate dep = dep->eqe_dump = eep; 7737c478bd9Sstevel@tonic-gate membar_producer(); 7747c478bd9Sstevel@tonic-gate } 7757c478bd9Sstevel@tonic-gate } 7767c478bd9Sstevel@tonic-gate 7777c478bd9Sstevel@tonic-gate /* 7787c478bd9Sstevel@tonic-gate * Now go ahead and drain any other errors on the pending list. 7797c478bd9Sstevel@tonic-gate * This call transparently handles case (1A) above, as well as 7807c478bd9Sstevel@tonic-gate * any other errors that were dispatched after errorq_drain() 7817c478bd9Sstevel@tonic-gate * completed its first compare-and-swap. 7827c478bd9Sstevel@tonic-gate */ 7837c478bd9Sstevel@tonic-gate errorq_drain(eqp); 7847c478bd9Sstevel@tonic-gate 7857c478bd9Sstevel@tonic-gate logged += eqp->eq_kstat.eqk_logged.value.ui64 - loggedtmp; 7867c478bd9Sstevel@tonic-gate } 7877c478bd9Sstevel@tonic-gate return (logged); 7887c478bd9Sstevel@tonic-gate } 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate /* 7917c478bd9Sstevel@tonic-gate * Drain all error queues - called only from panic context. Some drain 7927c478bd9Sstevel@tonic-gate * functions may enqueue errors to ERRORQ_NVLIST error queues so that 7937c478bd9Sstevel@tonic-gate * they may be written out in the panic dump - so ERRORQ_NVLIST queues 7947c478bd9Sstevel@tonic-gate * must be drained last. Drain ERRORQ_VITAL queues before nonvital queues 7957c478bd9Sstevel@tonic-gate * so that vital errors get to fill the ERRORQ_NVLIST queues first, and 7967c478bd9Sstevel@tonic-gate * do not drain the nonvital queues if there are many vital errors. 7977c478bd9Sstevel@tonic-gate */ 7987c478bd9Sstevel@tonic-gate void 7997c478bd9Sstevel@tonic-gate errorq_panic(void) 8007c478bd9Sstevel@tonic-gate { 8017c478bd9Sstevel@tonic-gate ASSERT(panicstr != NULL); 8027c478bd9Sstevel@tonic-gate 8037c478bd9Sstevel@tonic-gate if (errorq_panic_drain(ERRORQ_VITAL) <= errorq_vitalmin) 8047c478bd9Sstevel@tonic-gate (void) errorq_panic_drain(0); 8057c478bd9Sstevel@tonic-gate (void) errorq_panic_drain(ERRORQ_VITAL | ERRORQ_NVLIST); 8067c478bd9Sstevel@tonic-gate (void) errorq_panic_drain(ERRORQ_NVLIST); 8077c478bd9Sstevel@tonic-gate } 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate /* 8107c478bd9Sstevel@tonic-gate * Reserve an error queue element for later processing and dispatching. The 8117c478bd9Sstevel@tonic-gate * element is returned to the caller who may add error-specific data to 8127c478bd9Sstevel@tonic-gate * element. The element is retured to the free list when either 8137c478bd9Sstevel@tonic-gate * errorq_commit() is called and the element asynchronously processed 8147c478bd9Sstevel@tonic-gate * or immediately when errorq_cancel() is called. 8157c478bd9Sstevel@tonic-gate */ 8167c478bd9Sstevel@tonic-gate errorq_elem_t * 8177c478bd9Sstevel@tonic-gate errorq_reserve(errorq_t *eqp) 8187c478bd9Sstevel@tonic-gate { 8197c478bd9Sstevel@tonic-gate errorq_elem_t *eqep; 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate if (eqp == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) { 8227c478bd9Sstevel@tonic-gate atomic_add_64(&errorq_lost, 1); 8237c478bd9Sstevel@tonic-gate return (NULL); 8247c478bd9Sstevel@tonic-gate } 8257c478bd9Sstevel@tonic-gate 8267c478bd9Sstevel@tonic-gate while ((eqep = eqp->eq_free) != NULL) { 8277c478bd9Sstevel@tonic-gate if (casptr(&eqp->eq_free, eqep, eqep->eqe_prev) == eqep) 8287c478bd9Sstevel@tonic-gate break; 8297c478bd9Sstevel@tonic-gate } 8307c478bd9Sstevel@tonic-gate 8317c478bd9Sstevel@tonic-gate if (eqep == NULL) { 8327c478bd9Sstevel@tonic-gate atomic_add_64(&eqp->eq_kstat.eqk_dropped.value.ui64, 1); 8337c478bd9Sstevel@tonic-gate return (NULL); 8347c478bd9Sstevel@tonic-gate } 8357c478bd9Sstevel@tonic-gate 8367c478bd9Sstevel@tonic-gate if (eqp->eq_flags & ERRORQ_NVLIST) { 8377c478bd9Sstevel@tonic-gate errorq_nvelem_t *eqnp = eqep->eqe_data; 8387c478bd9Sstevel@tonic-gate nv_alloc_reset(eqnp->eqn_nva); 8397c478bd9Sstevel@tonic-gate eqnp->eqn_nvl = fm_nvlist_create(eqnp->eqn_nva); 8407c478bd9Sstevel@tonic-gate } 8417c478bd9Sstevel@tonic-gate 8427c478bd9Sstevel@tonic-gate atomic_add_64(&eqp->eq_kstat.eqk_reserved.value.ui64, 1); 8437c478bd9Sstevel@tonic-gate return (eqep); 8447c478bd9Sstevel@tonic-gate } 8457c478bd9Sstevel@tonic-gate 8467c478bd9Sstevel@tonic-gate /* 8477c478bd9Sstevel@tonic-gate * Commit an errorq element (eqep) for dispatching. 8487c478bd9Sstevel@tonic-gate * This function may be called from any context subject 8497c478bd9Sstevel@tonic-gate * to the Platform Considerations described above. 8507c478bd9Sstevel@tonic-gate */ 8517c478bd9Sstevel@tonic-gate void 8527c478bd9Sstevel@tonic-gate errorq_commit(errorq_t *eqp, errorq_elem_t *eqep, uint_t flag) 8537c478bd9Sstevel@tonic-gate { 8547c478bd9Sstevel@tonic-gate errorq_elem_t *old; 8557c478bd9Sstevel@tonic-gate 8567c478bd9Sstevel@tonic-gate if (eqep == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) { 8577c478bd9Sstevel@tonic-gate atomic_add_64(&eqp->eq_kstat.eqk_commit_fail.value.ui64, 1); 8587c478bd9Sstevel@tonic-gate return; 8597c478bd9Sstevel@tonic-gate } 8607c478bd9Sstevel@tonic-gate 8617c478bd9Sstevel@tonic-gate for (;;) { 8627c478bd9Sstevel@tonic-gate old = eqp->eq_pend; 8637c478bd9Sstevel@tonic-gate eqep->eqe_prev = old; 8647c478bd9Sstevel@tonic-gate membar_producer(); 8657c478bd9Sstevel@tonic-gate 8667c478bd9Sstevel@tonic-gate if (casptr(&eqp->eq_pend, old, eqep) == old) 8677c478bd9Sstevel@tonic-gate break; 8687c478bd9Sstevel@tonic-gate } 8697c478bd9Sstevel@tonic-gate 8707c478bd9Sstevel@tonic-gate atomic_add_64(&eqp->eq_kstat.eqk_committed.value.ui64, 1); 8717c478bd9Sstevel@tonic-gate 8727c478bd9Sstevel@tonic-gate if (flag == ERRORQ_ASYNC && eqp->eq_id != NULL) 8737c478bd9Sstevel@tonic-gate ddi_trigger_softintr(eqp->eq_id); 8747c478bd9Sstevel@tonic-gate } 8757c478bd9Sstevel@tonic-gate 8767c478bd9Sstevel@tonic-gate /* 8777c478bd9Sstevel@tonic-gate * Cancel an errorq element reservation by returning the specified element 8787c478bd9Sstevel@tonic-gate * to the free list. Duplicate or invalid frees are not supported. 8797c478bd9Sstevel@tonic-gate */ 8807c478bd9Sstevel@tonic-gate void 8817c478bd9Sstevel@tonic-gate errorq_cancel(errorq_t *eqp, errorq_elem_t *eqep) 8827c478bd9Sstevel@tonic-gate { 8837c478bd9Sstevel@tonic-gate errorq_elem_t *fep; 8847c478bd9Sstevel@tonic-gate 8857c478bd9Sstevel@tonic-gate if (eqep == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) 8867c478bd9Sstevel@tonic-gate return; 8877c478bd9Sstevel@tonic-gate 8887c478bd9Sstevel@tonic-gate for (;;) { 8897c478bd9Sstevel@tonic-gate fep = eqp->eq_free; 8907c478bd9Sstevel@tonic-gate eqep->eqe_prev = fep; 8917c478bd9Sstevel@tonic-gate membar_producer(); 8927c478bd9Sstevel@tonic-gate 8937c478bd9Sstevel@tonic-gate if (casptr(&eqp->eq_free, fep, eqep) == fep) 8947c478bd9Sstevel@tonic-gate break; 8957c478bd9Sstevel@tonic-gate } 8967c478bd9Sstevel@tonic-gate 8977c478bd9Sstevel@tonic-gate atomic_add_64(&eqp->eq_kstat.eqk_cancelled.value.ui64, 1); 8987c478bd9Sstevel@tonic-gate } 8997c478bd9Sstevel@tonic-gate 9007c478bd9Sstevel@tonic-gate /* 9017c478bd9Sstevel@tonic-gate * Write elements on the dump list of each nvlist errorq to the dump device. 9027c478bd9Sstevel@tonic-gate * Upon reboot, fmd(1M) will extract and replay them for diagnosis. 9037c478bd9Sstevel@tonic-gate */ 9047c478bd9Sstevel@tonic-gate void 9057c478bd9Sstevel@tonic-gate errorq_dump(void) 9067c478bd9Sstevel@tonic-gate { 9077c478bd9Sstevel@tonic-gate errorq_elem_t *eep; 9087c478bd9Sstevel@tonic-gate errorq_t *eqp; 9097c478bd9Sstevel@tonic-gate 9107c478bd9Sstevel@tonic-gate if (ereport_dumpbuf == NULL) 9117c478bd9Sstevel@tonic-gate return; /* reboot or panic before errorq is even set up */ 9127c478bd9Sstevel@tonic-gate 9137c478bd9Sstevel@tonic-gate for (eqp = errorq_list; eqp != NULL; eqp = eqp->eq_next) { 9147c478bd9Sstevel@tonic-gate if (!(eqp->eq_flags & ERRORQ_NVLIST) || 9157c478bd9Sstevel@tonic-gate !(eqp->eq_flags & ERRORQ_ACTIVE)) 9167c478bd9Sstevel@tonic-gate continue; /* do not dump this queue on panic */ 9177c478bd9Sstevel@tonic-gate 9187c478bd9Sstevel@tonic-gate for (eep = eqp->eq_dump; eep != NULL; eep = eep->eqe_dump) { 9197c478bd9Sstevel@tonic-gate errorq_nvelem_t *eqnp = eep->eqe_data; 9207c478bd9Sstevel@tonic-gate size_t len = 0; 9217c478bd9Sstevel@tonic-gate erpt_dump_t ed; 9227c478bd9Sstevel@tonic-gate int err; 9237c478bd9Sstevel@tonic-gate 9247c478bd9Sstevel@tonic-gate (void) nvlist_size(eqnp->eqn_nvl, 9257c478bd9Sstevel@tonic-gate &len, NV_ENCODE_NATIVE); 9267c478bd9Sstevel@tonic-gate 9277c478bd9Sstevel@tonic-gate if (len > ereport_dumplen || len == 0) { 9287c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "%s: unable to save error " 9297c478bd9Sstevel@tonic-gate "report %p due to size %lu\n", 9307c478bd9Sstevel@tonic-gate eqp->eq_name, (void *)eep, len); 9317c478bd9Sstevel@tonic-gate continue; 9327c478bd9Sstevel@tonic-gate } 9337c478bd9Sstevel@tonic-gate 9347c478bd9Sstevel@tonic-gate if ((err = nvlist_pack(eqnp->eqn_nvl, 9357c478bd9Sstevel@tonic-gate (char **)&ereport_dumpbuf, &ereport_dumplen, 9367c478bd9Sstevel@tonic-gate NV_ENCODE_NATIVE, KM_NOSLEEP)) != 0) { 9377c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "%s: unable to save error " 9387c478bd9Sstevel@tonic-gate "report %p due to pack error %d\n", 9397c478bd9Sstevel@tonic-gate eqp->eq_name, (void *)eep, err); 9407c478bd9Sstevel@tonic-gate continue; 9417c478bd9Sstevel@tonic-gate } 9427c478bd9Sstevel@tonic-gate 9437c478bd9Sstevel@tonic-gate ed.ed_magic = ERPT_MAGIC; 9447c478bd9Sstevel@tonic-gate ed.ed_chksum = checksum32(ereport_dumpbuf, len); 9457c478bd9Sstevel@tonic-gate ed.ed_size = (uint32_t)len; 9467c478bd9Sstevel@tonic-gate ed.ed_pad = 0; 9477c478bd9Sstevel@tonic-gate ed.ed_hrt_nsec = 0; 9487c478bd9Sstevel@tonic-gate ed.ed_hrt_base = panic_hrtime; 9497c478bd9Sstevel@tonic-gate ed.ed_tod_base.sec = panic_hrestime.tv_sec; 9507c478bd9Sstevel@tonic-gate ed.ed_tod_base.nsec = panic_hrestime.tv_nsec; 9517c478bd9Sstevel@tonic-gate 9527c478bd9Sstevel@tonic-gate dumpvp_write(&ed, sizeof (ed)); 9537c478bd9Sstevel@tonic-gate dumpvp_write(ereport_dumpbuf, len); 9547c478bd9Sstevel@tonic-gate } 9557c478bd9Sstevel@tonic-gate } 9567c478bd9Sstevel@tonic-gate } 9577c478bd9Sstevel@tonic-gate 9587c478bd9Sstevel@tonic-gate nvlist_t * 9597c478bd9Sstevel@tonic-gate errorq_elem_nvl(errorq_t *eqp, const errorq_elem_t *eqep) 9607c478bd9Sstevel@tonic-gate { 9617c478bd9Sstevel@tonic-gate errorq_nvelem_t *eqnp = eqep->eqe_data; 9627c478bd9Sstevel@tonic-gate 9637c478bd9Sstevel@tonic-gate ASSERT(eqp->eq_flags & ERRORQ_ACTIVE && eqp->eq_flags & ERRORQ_NVLIST); 9647c478bd9Sstevel@tonic-gate 9657c478bd9Sstevel@tonic-gate return (eqnp->eqn_nvl); 9667c478bd9Sstevel@tonic-gate } 9677c478bd9Sstevel@tonic-gate 9687c478bd9Sstevel@tonic-gate nv_alloc_t * 9697c478bd9Sstevel@tonic-gate errorq_elem_nva(errorq_t *eqp, const errorq_elem_t *eqep) 9707c478bd9Sstevel@tonic-gate { 9717c478bd9Sstevel@tonic-gate errorq_nvelem_t *eqnp = eqep->eqe_data; 9727c478bd9Sstevel@tonic-gate 9737c478bd9Sstevel@tonic-gate ASSERT(eqp->eq_flags & ERRORQ_ACTIVE && eqp->eq_flags & ERRORQ_NVLIST); 9747c478bd9Sstevel@tonic-gate 9757c478bd9Sstevel@tonic-gate return (eqnp->eqn_nva); 9767c478bd9Sstevel@tonic-gate } 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate /* 9797c478bd9Sstevel@tonic-gate * Reserve a new element and duplicate the data of the original into it. 9807c478bd9Sstevel@tonic-gate */ 9817c478bd9Sstevel@tonic-gate void * 9827c478bd9Sstevel@tonic-gate errorq_elem_dup(errorq_t *eqp, const errorq_elem_t *eqep, errorq_elem_t **neqep) 9837c478bd9Sstevel@tonic-gate { 9847c478bd9Sstevel@tonic-gate ASSERT(eqp->eq_flags & ERRORQ_ACTIVE); 9857c478bd9Sstevel@tonic-gate ASSERT(!(eqp->eq_flags & ERRORQ_NVLIST)); 9867c478bd9Sstevel@tonic-gate 9877c478bd9Sstevel@tonic-gate if ((*neqep = errorq_reserve(eqp)) == NULL) 9887c478bd9Sstevel@tonic-gate return (NULL); 9897c478bd9Sstevel@tonic-gate 9907c478bd9Sstevel@tonic-gate bcopy(eqep->eqe_data, (*neqep)->eqe_data, eqp->eq_size); 9917c478bd9Sstevel@tonic-gate return ((*neqep)->eqe_data); 9927c478bd9Sstevel@tonic-gate } 993