19454b2d8SWarner Losh /*-
2af2a5c76SGarrett Wollman * Copyright 1998 Massachusetts Institute of Technology
3af2a5c76SGarrett Wollman *
4af2a5c76SGarrett Wollman * Permission to use, copy, modify, and distribute this software and
5af2a5c76SGarrett Wollman * its documentation for any purpose and without fee is hereby
6af2a5c76SGarrett Wollman * granted, provided that both the above copyright notice and this
7af2a5c76SGarrett Wollman * permission notice appear in all copies, that both the above
8af2a5c76SGarrett Wollman * copyright notice and this permission notice appear in all
9af2a5c76SGarrett Wollman * supporting documentation, and that the name of M.I.T. not be used
10af2a5c76SGarrett Wollman * in advertising or publicity pertaining to distribution of the
11af2a5c76SGarrett Wollman * software without specific, written prior permission. M.I.T. makes
12af2a5c76SGarrett Wollman * no representations about the suitability of this software for any
13af2a5c76SGarrett Wollman * purpose. It is provided "as is" without express or implied
14af2a5c76SGarrett Wollman * warranty.
15af2a5c76SGarrett Wollman *
16af2a5c76SGarrett Wollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17af2a5c76SGarrett Wollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18af2a5c76SGarrett Wollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19af2a5c76SGarrett Wollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20af2a5c76SGarrett Wollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21af2a5c76SGarrett Wollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22af2a5c76SGarrett Wollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23af2a5c76SGarrett Wollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24af2a5c76SGarrett Wollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25af2a5c76SGarrett Wollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26af2a5c76SGarrett Wollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27af2a5c76SGarrett Wollman * SUCH DAMAGE.
28af2a5c76SGarrett Wollman */
29af2a5c76SGarrett Wollman
30af2a5c76SGarrett Wollman /*
31af2a5c76SGarrett Wollman * The kernel resource manager. This code is responsible for keeping track
32af2a5c76SGarrett Wollman * of hardware resources which are apportioned out to various drivers.
33af2a5c76SGarrett Wollman * It does not actually assign those resources, and it is not expected
34af2a5c76SGarrett Wollman * that end-device drivers will call into this code directly. Rather,
35af2a5c76SGarrett Wollman * the code which implements the buses that those devices are attached to,
36af2a5c76SGarrett Wollman * and the code which manages CPU resources, will call this code, and the
37af2a5c76SGarrett Wollman * end-device drivers will make upcalls to that code to actually perform
38af2a5c76SGarrett Wollman * the allocation.
39af2a5c76SGarrett Wollman *
40af2a5c76SGarrett Wollman * There are two sorts of resources managed by this code. The first is
41af2a5c76SGarrett Wollman * the more familiar array (RMAN_ARRAY) type; resources in this class
42af2a5c76SGarrett Wollman * consist of a sequence of individually-allocatable objects which have
43af2a5c76SGarrett Wollman * been numbered in some well-defined order. Most of the resources
44af2a5c76SGarrett Wollman * are of this type, as it is the most familiar. The second type is
45af2a5c76SGarrett Wollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46af2a5c76SGarrett Wollman * resources in which each instance is indistinguishable from every
47af2a5c76SGarrett Wollman * other instance). The principal anticipated application of gauges
48af2a5c76SGarrett Wollman * is in the context of power consumption, where a bus may have a specific
49af2a5c76SGarrett Wollman * power budget which all attached devices share. RMAN_GAUGE is not
50af2a5c76SGarrett Wollman * implemented yet.
51af2a5c76SGarrett Wollman *
52af2a5c76SGarrett Wollman * For array resources, we make one simplifying assumption: two clients
53af2a5c76SGarrett Wollman * sharing the same resource must use the same range of indices. That
54af2a5c76SGarrett Wollman * is to say, sharing of overlapping-but-not-identical regions is not
55af2a5c76SGarrett Wollman * permitted.
56af2a5c76SGarrett Wollman */
57af2a5c76SGarrett Wollman
582248f680SJohn Baldwin #include "opt_ddb.h"
592248f680SJohn Baldwin
60af2a5c76SGarrett Wollman #include <sys/param.h>
61af2a5c76SGarrett Wollman #include <sys/systm.h>
62f6233520SBruce Evans #include <sys/kernel.h>
635505470eSJohn Baldwin #include <sys/limits.h>
64af2a5c76SGarrett Wollman #include <sys/lock.h>
65af2a5c76SGarrett Wollman #include <sys/malloc.h>
661b367556SJason Evans #include <sys/mutex.h>
67af2a5c76SGarrett Wollman #include <sys/bus.h> /* XXX debugging */
686182fdbdSPeter Wemm #include <machine/bus.h>
696182fdbdSPeter Wemm #include <sys/rman.h>
702894f9d0SMitsuru IWASAKI #include <sys/sysctl.h>
71af2a5c76SGarrett Wollman
722248f680SJohn Baldwin #ifdef DDB
732248f680SJohn Baldwin #include <ddb/ddb.h>
742248f680SJohn Baldwin #endif
752248f680SJohn Baldwin
7606945066SPoul-Henning Kamp /*
7706945066SPoul-Henning Kamp * We use a linked list rather than a bitmap because we need to be able to
7806945066SPoul-Henning Kamp * represent potentially huge objects (like all of a processor's physical
7955c57a78SJessica Clarke * address space).
8006945066SPoul-Henning Kamp */
8106945066SPoul-Henning Kamp struct resource_i {
8206945066SPoul-Henning Kamp struct resource r_r;
8306945066SPoul-Henning Kamp TAILQ_ENTRY(resource_i) r_link;
8406945066SPoul-Henning Kamp LIST_ENTRY(resource_i) r_sharelink;
8506945066SPoul-Henning Kamp LIST_HEAD(, resource_i) *r_sharehead;
862dd1bdf1SJustin Hibbits rman_res_t r_start; /* index of the first entry in this resource */
872dd1bdf1SJustin Hibbits rman_res_t r_end; /* index of the last entry (inclusive) */
8806945066SPoul-Henning Kamp u_int r_flags;
8906945066SPoul-Henning Kamp void *r_virtual; /* virtual address of this resource */
9082a5a275SAndriy Gapon void *r_irq_cookie; /* interrupt cookie for this (interrupt) resource */
91d17f808fSAdrian Chadd device_t r_dev; /* device which has allocated this resource */
9206945066SPoul-Henning Kamp struct rman *r_rm; /* resource manager from whence this came */
9306945066SPoul-Henning Kamp int r_rid; /* optional rid for this resource. */
94b30a80b6SJohn Baldwin int r_type; /* optional type for this resource. */
9506945066SPoul-Henning Kamp };
9606945066SPoul-Henning Kamp
978ed4d9c9SWarner Losh static int rman_debug = 0;
98af3b2549SHans Petter Selasky SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN,
992894f9d0SMitsuru IWASAKI &rman_debug, 0, "rman debug");
1002894f9d0SMitsuru IWASAKI
101973c3229SElliott Mitchell #define DPRINTF(...) do { if (rman_debug) printf(__VA_ARGS__); } while (0)
102c0151c49SPaul Richards
1032a96b3faSEivind Eklund static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
104af2a5c76SGarrett Wollman
105af2a5c76SGarrett Wollman struct rman_head rman_head;
1061b367556SJason Evans static struct mtx rman_mtx; /* mutex to protect rman_head */
107ae7ff71fSPoul-Henning Kamp static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
108ae7ff71fSPoul-Henning Kamp
109ae7ff71fSPoul-Henning Kamp static __inline struct resource_i *
int_alloc_resource(int malloc_flag)110ae7ff71fSPoul-Henning Kamp int_alloc_resource(int malloc_flag)
111ae7ff71fSPoul-Henning Kamp {
112ae7ff71fSPoul-Henning Kamp struct resource_i *r;
113ae7ff71fSPoul-Henning Kamp
114ae7ff71fSPoul-Henning Kamp r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
115ae7ff71fSPoul-Henning Kamp if (r != NULL) {
116ae7ff71fSPoul-Henning Kamp r->r_r.__r_i = r;
117ae7ff71fSPoul-Henning Kamp }
118ae7ff71fSPoul-Henning Kamp return (r);
119ae7ff71fSPoul-Henning Kamp }
120af2a5c76SGarrett Wollman
121af2a5c76SGarrett Wollman int
rman_init(struct rman * rm)122af2a5c76SGarrett Wollman rman_init(struct rman *rm)
123af2a5c76SGarrett Wollman {
124dc22aef2SPyun YongHyeon static int once = 0;
125af2a5c76SGarrett Wollman
126af2a5c76SGarrett Wollman if (once == 0) {
127af2a5c76SGarrett Wollman once = 1;
128af2a5c76SGarrett Wollman TAILQ_INIT(&rman_head);
1296008862bSJohn Baldwin mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
130af2a5c76SGarrett Wollman }
131af2a5c76SGarrett Wollman
132b67d11bbSJohn Baldwin if (rm->rm_start == 0 && rm->rm_end == 0)
133534ccd7bSJustin Hibbits rm->rm_end = ~0;
134af2a5c76SGarrett Wollman if (rm->rm_type == RMAN_UNINIT)
135af2a5c76SGarrett Wollman panic("rman_init");
136af2a5c76SGarrett Wollman if (rm->rm_type == RMAN_GAUGE)
137af2a5c76SGarrett Wollman panic("implement RMAN_GAUGE");
138af2a5c76SGarrett Wollman
139a077f635SKirk McKusick TAILQ_INIT(&rm->rm_list);
1406a40ecceSJohn Baldwin rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
141dc22aef2SPyun YongHyeon if (rm->rm_mtx == NULL)
142af2a5c76SGarrett Wollman return ENOMEM;
1436008862bSJohn Baldwin mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
144af2a5c76SGarrett Wollman
1459ed346baSBosko Milekic mtx_lock(&rman_mtx);
146af2a5c76SGarrett Wollman TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
1479ed346baSBosko Milekic mtx_unlock(&rman_mtx);
148af2a5c76SGarrett Wollman return 0;
149af2a5c76SGarrett Wollman }
150af2a5c76SGarrett Wollman
151af2a5c76SGarrett Wollman int
rman_manage_region(struct rman * rm,rman_res_t start,rman_res_t end)1522dd1bdf1SJustin Hibbits rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
153af2a5c76SGarrett Wollman {
1549914a8ccSJohn Baldwin struct resource_i *r, *s, *t;
155a241707eSWarner Losh int rv = 0;
156af2a5c76SGarrett Wollman
157beb1165aSElliott Mitchell DPRINTF("%s: <%s> request: start %#jx, end %#jx\n", __func__,
158973c3229SElliott Mitchell rm->rm_descr, start, end);
159b67d11bbSJohn Baldwin if (start < rm->rm_start || end > rm->rm_end)
160b67d11bbSJohn Baldwin return EINVAL;
161ae7ff71fSPoul-Henning Kamp r = int_alloc_resource(M_NOWAIT);
162dc22aef2SPyun YongHyeon if (r == NULL)
163af2a5c76SGarrett Wollman return ENOMEM;
164af2a5c76SGarrett Wollman r->r_start = start;
165af2a5c76SGarrett Wollman r->r_end = end;
166af2a5c76SGarrett Wollman r->r_rm = rm;
167af2a5c76SGarrett Wollman
1689ed346baSBosko Milekic mtx_lock(rm->rm_mtx);
1699914a8ccSJohn Baldwin
1709914a8ccSJohn Baldwin /* Skip entries before us. */
1715505470eSJohn Baldwin TAILQ_FOREACH(s, &rm->rm_list, r_link) {
172da1b038aSJustin Hibbits if (s->r_end == ~0)
1735505470eSJohn Baldwin break;
1745505470eSJohn Baldwin if (s->r_end + 1 >= r->r_start)
1755505470eSJohn Baldwin break;
1765505470eSJohn Baldwin }
177af2a5c76SGarrett Wollman
1789914a8ccSJohn Baldwin /* If we ran off the end of the list, insert at the tail. */
179a077f635SKirk McKusick if (s == NULL) {
180a077f635SKirk McKusick TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
181af2a5c76SGarrett Wollman } else {
1829914a8ccSJohn Baldwin /* Check for any overlap with the current region. */
183a241707eSWarner Losh if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
184a241707eSWarner Losh rv = EBUSY;
185a241707eSWarner Losh goto out;
186a241707eSWarner Losh }
1879914a8ccSJohn Baldwin
1889914a8ccSJohn Baldwin /* Check for any overlap with the next region. */
1899914a8ccSJohn Baldwin t = TAILQ_NEXT(s, r_link);
190a241707eSWarner Losh if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
191a241707eSWarner Losh rv = EBUSY;
192a241707eSWarner Losh goto out;
193a241707eSWarner Losh }
1949914a8ccSJohn Baldwin
1959914a8ccSJohn Baldwin /*
1969914a8ccSJohn Baldwin * See if this region can be merged with the next region. If
1979914a8ccSJohn Baldwin * not, clear the pointer.
1989914a8ccSJohn Baldwin */
1999914a8ccSJohn Baldwin if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
2009914a8ccSJohn Baldwin t = NULL;
2019914a8ccSJohn Baldwin
2029914a8ccSJohn Baldwin /* See if we can merge with the current region. */
2039914a8ccSJohn Baldwin if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
2049914a8ccSJohn Baldwin /* Can we merge all 3 regions? */
2059914a8ccSJohn Baldwin if (t != NULL) {
2069914a8ccSJohn Baldwin s->r_end = t->r_end;
2079914a8ccSJohn Baldwin TAILQ_REMOVE(&rm->rm_list, t, r_link);
2089914a8ccSJohn Baldwin free(r, M_RMAN);
2099914a8ccSJohn Baldwin free(t, M_RMAN);
2109914a8ccSJohn Baldwin } else {
2119914a8ccSJohn Baldwin s->r_end = r->r_end;
2129914a8ccSJohn Baldwin free(r, M_RMAN);
2139914a8ccSJohn Baldwin }
21404f0ce21SScott Long } else if (t != NULL) {
2159914a8ccSJohn Baldwin /* Can we merge with just the next region? */
2169914a8ccSJohn Baldwin t->r_start = r->r_start;
2179914a8ccSJohn Baldwin free(r, M_RMAN);
21804f0ce21SScott Long } else if (s->r_end < r->r_start) {
21904f0ce21SScott Long TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
22004f0ce21SScott Long } else {
221a077f635SKirk McKusick TAILQ_INSERT_BEFORE(s, r, r_link);
222af2a5c76SGarrett Wollman }
2239914a8ccSJohn Baldwin }
224a241707eSWarner Losh out:
2259ed346baSBosko Milekic mtx_unlock(rm->rm_mtx);
226a241707eSWarner Losh return rv;
227af2a5c76SGarrett Wollman }
228af2a5c76SGarrett Wollman
229af2a5c76SGarrett Wollman int
rman_init_from_resource(struct rman * rm,struct resource * r)230ccdc8d9bSWarner Losh rman_init_from_resource(struct rman *rm, struct resource *r)
231ccdc8d9bSWarner Losh {
232ccdc8d9bSWarner Losh int rv;
233ccdc8d9bSWarner Losh
234ccdc8d9bSWarner Losh if ((rv = rman_init(rm)) != 0)
235ccdc8d9bSWarner Losh return (rv);
236ccdc8d9bSWarner Losh return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
237ccdc8d9bSWarner Losh }
238ccdc8d9bSWarner Losh
239ccdc8d9bSWarner Losh int
rman_fini(struct rman * rm)240af2a5c76SGarrett Wollman rman_fini(struct rman *rm)
241af2a5c76SGarrett Wollman {
242ae7ff71fSPoul-Henning Kamp struct resource_i *r;
243af2a5c76SGarrett Wollman
2449ed346baSBosko Milekic mtx_lock(rm->rm_mtx);
245a077f635SKirk McKusick TAILQ_FOREACH(r, &rm->rm_list, r_link) {
2466182fdbdSPeter Wemm if (r->r_flags & RF_ALLOCATED) {
2479ed346baSBosko Milekic mtx_unlock(rm->rm_mtx);
248af2a5c76SGarrett Wollman return EBUSY;
249af2a5c76SGarrett Wollman }
2506182fdbdSPeter Wemm }
251af2a5c76SGarrett Wollman
252af2a5c76SGarrett Wollman /*
253af2a5c76SGarrett Wollman * There really should only be one of these if we are in this
254af2a5c76SGarrett Wollman * state and the code is working properly, but it can't hurt.
255af2a5c76SGarrett Wollman */
256a077f635SKirk McKusick while (!TAILQ_EMPTY(&rm->rm_list)) {
257a077f635SKirk McKusick r = TAILQ_FIRST(&rm->rm_list);
258a077f635SKirk McKusick TAILQ_REMOVE(&rm->rm_list, r, r_link);
259af2a5c76SGarrett Wollman free(r, M_RMAN);
260af2a5c76SGarrett Wollman }
2619ed346baSBosko Milekic mtx_unlock(rm->rm_mtx);
2629ed346baSBosko Milekic mtx_lock(&rman_mtx);
263af2a5c76SGarrett Wollman TAILQ_REMOVE(&rman_head, rm, rm_link);
2649ed346baSBosko Milekic mtx_unlock(&rman_mtx);
2651b367556SJason Evans mtx_destroy(rm->rm_mtx);
2661b367556SJason Evans free(rm->rm_mtx, M_RMAN);
267af2a5c76SGarrett Wollman
268af2a5c76SGarrett Wollman return 0;
269af2a5c76SGarrett Wollman }
270af2a5c76SGarrett Wollman
271bb82622cSJohn Baldwin int
rman_first_free_region(struct rman * rm,rman_res_t * start,rman_res_t * end)2722dd1bdf1SJustin Hibbits rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
273bb82622cSJohn Baldwin {
274bb82622cSJohn Baldwin struct resource_i *r;
275bb82622cSJohn Baldwin
276bb82622cSJohn Baldwin mtx_lock(rm->rm_mtx);
277bb82622cSJohn Baldwin TAILQ_FOREACH(r, &rm->rm_list, r_link) {
278bb82622cSJohn Baldwin if (!(r->r_flags & RF_ALLOCATED)) {
279bb82622cSJohn Baldwin *start = r->r_start;
280bb82622cSJohn Baldwin *end = r->r_end;
281bb82622cSJohn Baldwin mtx_unlock(rm->rm_mtx);
282bb82622cSJohn Baldwin return (0);
283bb82622cSJohn Baldwin }
284bb82622cSJohn Baldwin }
285bb82622cSJohn Baldwin mtx_unlock(rm->rm_mtx);
286bb82622cSJohn Baldwin return (ENOENT);
287bb82622cSJohn Baldwin }
288bb82622cSJohn Baldwin
289bb82622cSJohn Baldwin int
rman_last_free_region(struct rman * rm,rman_res_t * start,rman_res_t * end)2902dd1bdf1SJustin Hibbits rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
291bb82622cSJohn Baldwin {
292bb82622cSJohn Baldwin struct resource_i *r;
293bb82622cSJohn Baldwin
294bb82622cSJohn Baldwin mtx_lock(rm->rm_mtx);
295bb82622cSJohn Baldwin TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
296bb82622cSJohn Baldwin if (!(r->r_flags & RF_ALLOCATED)) {
297bb82622cSJohn Baldwin *start = r->r_start;
298bb82622cSJohn Baldwin *end = r->r_end;
299bb82622cSJohn Baldwin mtx_unlock(rm->rm_mtx);
300bb82622cSJohn Baldwin return (0);
301bb82622cSJohn Baldwin }
302bb82622cSJohn Baldwin }
303bb82622cSJohn Baldwin mtx_unlock(rm->rm_mtx);
304bb82622cSJohn Baldwin return (ENOENT);
305bb82622cSJohn Baldwin }
306bb82622cSJohn Baldwin
307bb82622cSJohn Baldwin /* Shrink or extend one or both ends of an allocated resource. */
308bb82622cSJohn Baldwin int
rman_adjust_resource(struct resource * rr,rman_res_t start,rman_res_t end)3092dd1bdf1SJustin Hibbits rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
310bb82622cSJohn Baldwin {
311bb82622cSJohn Baldwin struct resource_i *r, *s, *t, *new;
312bb82622cSJohn Baldwin struct rman *rm;
313bb82622cSJohn Baldwin
314bb82622cSJohn Baldwin /* Not supported for shared resources. */
315bb82622cSJohn Baldwin r = rr->__r_i;
316d3a68794SDon Lewis if (r->r_flags & RF_SHAREABLE)
317bb82622cSJohn Baldwin return (EINVAL);
318bb82622cSJohn Baldwin
319bb82622cSJohn Baldwin /*
320bb82622cSJohn Baldwin * This does not support wholesale moving of a resource. At
321bb82622cSJohn Baldwin * least part of the desired new range must overlap with the
322bb82622cSJohn Baldwin * existing resource.
323bb82622cSJohn Baldwin */
324bb82622cSJohn Baldwin if (end < r->r_start || r->r_end < start)
325bb82622cSJohn Baldwin return (EINVAL);
326bb82622cSJohn Baldwin
327bb82622cSJohn Baldwin /*
328bb82622cSJohn Baldwin * Find the two resource regions immediately adjacent to the
329bb82622cSJohn Baldwin * allocated resource.
330bb82622cSJohn Baldwin */
331bb82622cSJohn Baldwin rm = r->r_rm;
332bb82622cSJohn Baldwin mtx_lock(rm->rm_mtx);
333bb82622cSJohn Baldwin #ifdef INVARIANTS
334bb82622cSJohn Baldwin TAILQ_FOREACH(s, &rm->rm_list, r_link) {
335bb82622cSJohn Baldwin if (s == r)
336bb82622cSJohn Baldwin break;
337bb82622cSJohn Baldwin }
338bb82622cSJohn Baldwin if (s == NULL)
339bb82622cSJohn Baldwin panic("resource not in list");
340bb82622cSJohn Baldwin #endif
341bb82622cSJohn Baldwin s = TAILQ_PREV(r, resource_head, r_link);
342bb82622cSJohn Baldwin t = TAILQ_NEXT(r, r_link);
343bb82622cSJohn Baldwin KASSERT(s == NULL || s->r_end + 1 == r->r_start,
344bb82622cSJohn Baldwin ("prev resource mismatch"));
345bb82622cSJohn Baldwin KASSERT(t == NULL || r->r_end + 1 == t->r_start,
346bb82622cSJohn Baldwin ("next resource mismatch"));
347bb82622cSJohn Baldwin
348bb82622cSJohn Baldwin /*
349bb82622cSJohn Baldwin * See if the changes are permitted. Shrinking is always allowed,
350bb82622cSJohn Baldwin * but growing requires sufficient room in the adjacent region.
351bb82622cSJohn Baldwin */
352bb82622cSJohn Baldwin if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
353bb82622cSJohn Baldwin s->r_start > start)) {
354bb82622cSJohn Baldwin mtx_unlock(rm->rm_mtx);
355bb82622cSJohn Baldwin return (EBUSY);
356bb82622cSJohn Baldwin }
357bb82622cSJohn Baldwin if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
358bb82622cSJohn Baldwin t->r_end < end)) {
359bb82622cSJohn Baldwin mtx_unlock(rm->rm_mtx);
360bb82622cSJohn Baldwin return (EBUSY);
361bb82622cSJohn Baldwin }
362bb82622cSJohn Baldwin
363bb82622cSJohn Baldwin /*
364bb82622cSJohn Baldwin * While holding the lock, grow either end of the resource as
365bb82622cSJohn Baldwin * needed and shrink either end if the shrinking does not require
366bb82622cSJohn Baldwin * allocating a new resource. We can safely drop the lock and then
367bb82622cSJohn Baldwin * insert a new range to handle the shrinking case afterwards.
368bb82622cSJohn Baldwin */
369bb82622cSJohn Baldwin if (start < r->r_start ||
370bb82622cSJohn Baldwin (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
371bb82622cSJohn Baldwin KASSERT(s->r_flags == 0, ("prev is busy"));
372bb82622cSJohn Baldwin r->r_start = start;
373bb82622cSJohn Baldwin if (s->r_start == start) {
374bb82622cSJohn Baldwin TAILQ_REMOVE(&rm->rm_list, s, r_link);
375bb82622cSJohn Baldwin free(s, M_RMAN);
376bb82622cSJohn Baldwin } else
377bb82622cSJohn Baldwin s->r_end = start - 1;
378bb82622cSJohn Baldwin }
379bb82622cSJohn Baldwin if (end > r->r_end ||
380bb82622cSJohn Baldwin (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
381bb82622cSJohn Baldwin KASSERT(t->r_flags == 0, ("next is busy"));
382bb82622cSJohn Baldwin r->r_end = end;
383bb82622cSJohn Baldwin if (t->r_end == end) {
384bb82622cSJohn Baldwin TAILQ_REMOVE(&rm->rm_list, t, r_link);
385bb82622cSJohn Baldwin free(t, M_RMAN);
386bb82622cSJohn Baldwin } else
387bb82622cSJohn Baldwin t->r_start = end + 1;
388bb82622cSJohn Baldwin }
389bb82622cSJohn Baldwin mtx_unlock(rm->rm_mtx);
390bb82622cSJohn Baldwin
391bb82622cSJohn Baldwin /*
392bb82622cSJohn Baldwin * Handle the shrinking cases that require allocating a new
393bb82622cSJohn Baldwin * resource to hold the newly-free region. We have to recheck
394bb82622cSJohn Baldwin * if we still need this new region after acquiring the lock.
395bb82622cSJohn Baldwin */
396bb82622cSJohn Baldwin if (start > r->r_start) {
397bb82622cSJohn Baldwin new = int_alloc_resource(M_WAITOK);
398bb82622cSJohn Baldwin new->r_start = r->r_start;
399bb82622cSJohn Baldwin new->r_end = start - 1;
400bb82622cSJohn Baldwin new->r_rm = rm;
401bb82622cSJohn Baldwin mtx_lock(rm->rm_mtx);
402bb82622cSJohn Baldwin r->r_start = start;
403bb82622cSJohn Baldwin s = TAILQ_PREV(r, resource_head, r_link);
404bb82622cSJohn Baldwin if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
405bb82622cSJohn Baldwin s->r_end = start - 1;
406bb82622cSJohn Baldwin free(new, M_RMAN);
407bb82622cSJohn Baldwin } else
408bb82622cSJohn Baldwin TAILQ_INSERT_BEFORE(r, new, r_link);
409bb82622cSJohn Baldwin mtx_unlock(rm->rm_mtx);
410bb82622cSJohn Baldwin }
411bb82622cSJohn Baldwin if (end < r->r_end) {
412bb82622cSJohn Baldwin new = int_alloc_resource(M_WAITOK);
413bb82622cSJohn Baldwin new->r_start = end + 1;
414bb82622cSJohn Baldwin new->r_end = r->r_end;
415bb82622cSJohn Baldwin new->r_rm = rm;
416bb82622cSJohn Baldwin mtx_lock(rm->rm_mtx);
417bb82622cSJohn Baldwin r->r_end = end;
418bb82622cSJohn Baldwin t = TAILQ_NEXT(r, r_link);
419bb82622cSJohn Baldwin if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
420bb82622cSJohn Baldwin t->r_start = end + 1;
421bb82622cSJohn Baldwin free(new, M_RMAN);
422bb82622cSJohn Baldwin } else
423bb82622cSJohn Baldwin TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
424bb82622cSJohn Baldwin mtx_unlock(rm->rm_mtx);
425bb82622cSJohn Baldwin }
426bb82622cSJohn Baldwin return (0);
427bb82622cSJohn Baldwin }
428bb82622cSJohn Baldwin
429d3a68794SDon Lewis #define SHARE_TYPE(f) (f & (RF_SHAREABLE | RF_PREFETCHABLE))
4305b892e73SDon Lewis
431af2a5c76SGarrett Wollman struct resource *
rman_reserve_resource(struct rman * rm,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags,device_t dev)432037946dcSElliott Mitchell rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end,
433037946dcSElliott Mitchell rman_res_t count, u_int flags, device_t dev)
434af2a5c76SGarrett Wollman {
4355b892e73SDon Lewis u_int new_rflags;
436ae7ff71fSPoul-Henning Kamp struct resource_i *r, *s, *rv;
437037946dcSElliott Mitchell rman_res_t rstart, rend, amask;
438af2a5c76SGarrett Wollman
439dc22aef2SPyun YongHyeon rv = NULL;
440af2a5c76SGarrett Wollman
441beb1165aSElliott Mitchell DPRINTF("%s: <%s> request: [%#jx, %#jx], length %#jx, flags %x, "
442beb1165aSElliott Mitchell "device %s\n", __func__, rm->rm_descr, start, end, count, flags,
443973c3229SElliott Mitchell dev == NULL ? "<null>" : device_get_nameunit(dev));
444bcddaadbSElliott Mitchell KASSERT(count != 0, ("%s: attempted to allocate an empty range",
445bcddaadbSElliott Mitchell __func__));
446d3a68794SDon Lewis KASSERT((flags & RF_FIRSTSHARE) == 0,
4475b892e73SDon Lewis ("invalid flags %#x", flags));
448d3a68794SDon Lewis new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
449af2a5c76SGarrett Wollman
4509ed346baSBosko Milekic mtx_lock(rm->rm_mtx);
451af2a5c76SGarrett Wollman
452da1b038aSJustin Hibbits r = TAILQ_FIRST(&rm->rm_list);
453beb1165aSElliott Mitchell if (r == NULL)
454973c3229SElliott Mitchell DPRINTF("NULL list head\n");
455beb1165aSElliott Mitchell else
456beb1165aSElliott Mitchell DPRINTF("%s: trying %#jx <%#jx,%#jx>\n", __func__, r->r_end,
457beb1165aSElliott Mitchell start, count-1);
458beb1165aSElliott Mitchell
459a077f635SKirk McKusick for (r = TAILQ_FIRST(&rm->rm_list);
4601237b6d9SDon Lewis r && r->r_end < start + count - 1;
461beb1165aSElliott Mitchell r = TAILQ_NEXT(r, r_link))
462beb1165aSElliott Mitchell DPRINTF("%s: tried %#jx <%#jx,%#jx>\n", __func__, r->r_end,
463beb1165aSElliott Mitchell start, count-1);
464af2a5c76SGarrett Wollman
465a077f635SKirk McKusick if (r == NULL) {
466973c3229SElliott Mitchell DPRINTF("could not find a region\n");
467af2a5c76SGarrett Wollman goto out;
468af2a5c76SGarrett Wollman }
469af2a5c76SGarrett Wollman
470da1b038aSJustin Hibbits amask = (1ull << RF_ALIGNMENT(flags)) - 1;
471da1b038aSJustin Hibbits KASSERT(start <= RM_MAX_END - amask,
472da1b038aSJustin Hibbits ("start (%#jx) + amask (%#jx) would wrap around", start, amask));
4731237b6d9SDon Lewis
474af2a5c76SGarrett Wollman /*
475af2a5c76SGarrett Wollman * First try to find an acceptable totally-unshared region.
476af2a5c76SGarrett Wollman */
477a077f635SKirk McKusick for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
478973c3229SElliott Mitchell DPRINTF("considering [%#jx, %#jx]\n", s->r_start, s->r_end);
4791237b6d9SDon Lewis /*
4801237b6d9SDon Lewis * The resource list is sorted, so there is no point in
4811237b6d9SDon Lewis * searching further once r_start is too large.
4821237b6d9SDon Lewis */
4831237b6d9SDon Lewis if (s->r_start > end - (count - 1)) {
484973c3229SElliott Mitchell DPRINTF("s->r_start (%#jx) + count - 1> end (%#jx)\n",
485973c3229SElliott Mitchell s->r_start, end);
486af2a5c76SGarrett Wollman break;
487af2a5c76SGarrett Wollman }
488da1b038aSJustin Hibbits if (s->r_start > RM_MAX_END - amask) {
489973c3229SElliott Mitchell DPRINTF("s->r_start (%#jx) + amask (%#jx) too large\n",
490973c3229SElliott Mitchell s->r_start, amask);
4911237b6d9SDon Lewis break;
4921237b6d9SDon Lewis }
493af2a5c76SGarrett Wollman if (s->r_flags & RF_ALLOCATED) {
494973c3229SElliott Mitchell DPRINTF("region is allocated\n");
495af2a5c76SGarrett Wollman continue;
496af2a5c76SGarrett Wollman }
497da1b038aSJustin Hibbits rstart = ummax(s->r_start, start);
49813fb6657SThomas Moestl /*
49913fb6657SThomas Moestl * Try to find a region by adjusting to boundary and alignment
50013fb6657SThomas Moestl * until both conditions are satisfied. This is not an optimal
50113fb6657SThomas Moestl * algorithm, but in most cases it isn't really bad, either.
50213fb6657SThomas Moestl */
50313fb6657SThomas Moestl do {
50413fb6657SThomas Moestl rstart = (rstart + amask) & ~amask;
50513fb6657SThomas Moestl } while ((rstart & amask) != 0 && rstart < end &&
50613fb6657SThomas Moestl rstart < s->r_end);
507da1b038aSJustin Hibbits rend = ummin(s->r_end, ummax(rstart + count - 1, end));
5083aea1e14SMitsuru IWASAKI if (rstart > rend) {
509973c3229SElliott Mitchell DPRINTF("adjusted start exceeds end\n");
5103aea1e14SMitsuru IWASAKI continue;
5113aea1e14SMitsuru IWASAKI }
512973c3229SElliott Mitchell DPRINTF("truncated region: [%#jx, %#jx]; size %#jx (requested %#jx)\n",
513973c3229SElliott Mitchell rstart, rend, (rend - rstart + 1), count);
514af2a5c76SGarrett Wollman
515bcddaadbSElliott Mitchell if ((rend - rstart) >= (count - 1)) {
516973c3229SElliott Mitchell DPRINTF("candidate region: [%#jx, %#jx], size %#jx\n",
517973c3229SElliott Mitchell rstart, rend, (rend - rstart + 1));
518af2a5c76SGarrett Wollman if ((s->r_end - s->r_start + 1) == count) {
519973c3229SElliott Mitchell DPRINTF("candidate region is entire chunk\n");
520af2a5c76SGarrett Wollman rv = s;
5215b892e73SDon Lewis rv->r_flags = new_rflags;
522af2a5c76SGarrett Wollman rv->r_dev = dev;
523af2a5c76SGarrett Wollman goto out;
524af2a5c76SGarrett Wollman }
525af2a5c76SGarrett Wollman
526af2a5c76SGarrett Wollman /*
527af2a5c76SGarrett Wollman * If s->r_start < rstart and
528af2a5c76SGarrett Wollman * s->r_end > rstart + count - 1, then
529af2a5c76SGarrett Wollman * we need to split the region into three pieces
530af2a5c76SGarrett Wollman * (the middle one will get returned to the user).
531af2a5c76SGarrett Wollman * Otherwise, we are allocating at either the
532af2a5c76SGarrett Wollman * beginning or the end of s, so we only need to
533af2a5c76SGarrett Wollman * split it in two. The first case requires
534af2a5c76SGarrett Wollman * two new allocations; the second requires but one.
535af2a5c76SGarrett Wollman */
536ae7ff71fSPoul-Henning Kamp rv = int_alloc_resource(M_NOWAIT);
537dc22aef2SPyun YongHyeon if (rv == NULL)
538af2a5c76SGarrett Wollman goto out;
539af2a5c76SGarrett Wollman rv->r_start = rstart;
540af2a5c76SGarrett Wollman rv->r_end = rstart + count - 1;
5415b892e73SDon Lewis rv->r_flags = new_rflags;
542af2a5c76SGarrett Wollman rv->r_dev = dev;
5436182fdbdSPeter Wemm rv->r_rm = rm;
544af2a5c76SGarrett Wollman
545af2a5c76SGarrett Wollman if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
546973c3229SElliott Mitchell DPRINTF("splitting region in three parts: "
547da1b038aSJustin Hibbits "[%#jx, %#jx]; [%#jx, %#jx]; [%#jx, %#jx]\n",
548af2a5c76SGarrett Wollman s->r_start, rv->r_start - 1,
549af2a5c76SGarrett Wollman rv->r_start, rv->r_end,
550973c3229SElliott Mitchell rv->r_end + 1, s->r_end);
551af2a5c76SGarrett Wollman /*
552af2a5c76SGarrett Wollman * We are allocating in the middle.
553af2a5c76SGarrett Wollman */
554ae7ff71fSPoul-Henning Kamp r = int_alloc_resource(M_NOWAIT);
555dc22aef2SPyun YongHyeon if (r == NULL) {
556af2a5c76SGarrett Wollman free(rv, M_RMAN);
557dc22aef2SPyun YongHyeon rv = NULL;
558af2a5c76SGarrett Wollman goto out;
559af2a5c76SGarrett Wollman }
560af2a5c76SGarrett Wollman r->r_start = rv->r_end + 1;
561af2a5c76SGarrett Wollman r->r_end = s->r_end;
562af2a5c76SGarrett Wollman r->r_flags = s->r_flags;
5636182fdbdSPeter Wemm r->r_rm = rm;
564af2a5c76SGarrett Wollman s->r_end = rv->r_start - 1;
565a077f635SKirk McKusick TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
566af2a5c76SGarrett Wollman r_link);
567a077f635SKirk McKusick TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
568af2a5c76SGarrett Wollman r_link);
569af2a5c76SGarrett Wollman } else if (s->r_start == rv->r_start) {
570973c3229SElliott Mitchell DPRINTF("allocating from the beginning\n");
571af2a5c76SGarrett Wollman /*
572af2a5c76SGarrett Wollman * We are allocating at the beginning.
573af2a5c76SGarrett Wollman */
574af2a5c76SGarrett Wollman s->r_start = rv->r_end + 1;
575a077f635SKirk McKusick TAILQ_INSERT_BEFORE(s, rv, r_link);
576af2a5c76SGarrett Wollman } else {
577973c3229SElliott Mitchell DPRINTF("allocating at the end\n");
578af2a5c76SGarrett Wollman /*
579af2a5c76SGarrett Wollman * We are allocating at the end.
580af2a5c76SGarrett Wollman */
581af2a5c76SGarrett Wollman s->r_end = rv->r_start - 1;
582a077f635SKirk McKusick TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
583af2a5c76SGarrett Wollman r_link);
584af2a5c76SGarrett Wollman }
585af2a5c76SGarrett Wollman goto out;
586af2a5c76SGarrett Wollman }
587af2a5c76SGarrett Wollman }
588af2a5c76SGarrett Wollman
589af2a5c76SGarrett Wollman /*
590af2a5c76SGarrett Wollman * Now find an acceptable shared region, if the client's requirements
591af2a5c76SGarrett Wollman * allow sharing. By our implementation restriction, a candidate
592af2a5c76SGarrett Wollman * region must match exactly by both size and sharing type in order
593af2a5c76SGarrett Wollman * to be considered compatible with the client's request. (The
594af2a5c76SGarrett Wollman * former restriction could probably be lifted without too much
595af2a5c76SGarrett Wollman * additional work, but this does not seem warranted.)
596af2a5c76SGarrett Wollman */
597973c3229SElliott Mitchell DPRINTF("no unshared regions found\n");
598d3a68794SDon Lewis if ((flags & RF_SHAREABLE) == 0)
599af2a5c76SGarrett Wollman goto out;
600af2a5c76SGarrett Wollman
601c201b03fSDon Lewis for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
6025b892e73SDon Lewis if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
603c201b03fSDon Lewis s->r_start >= start &&
604c201b03fSDon Lewis (s->r_end - s->r_start + 1) == count &&
605037946dcSElliott Mitchell (s->r_start & amask) == 0) {
606ae7ff71fSPoul-Henning Kamp rv = int_alloc_resource(M_NOWAIT);
607dc22aef2SPyun YongHyeon if (rv == NULL)
608af2a5c76SGarrett Wollman goto out;
609af2a5c76SGarrett Wollman rv->r_start = s->r_start;
610af2a5c76SGarrett Wollman rv->r_end = s->r_end;
6115b892e73SDon Lewis rv->r_flags = new_rflags;
612af2a5c76SGarrett Wollman rv->r_dev = dev;
613af2a5c76SGarrett Wollman rv->r_rm = rm;
614dc22aef2SPyun YongHyeon if (s->r_sharehead == NULL) {
615af2a5c76SGarrett Wollman s->r_sharehead = malloc(sizeof *s->r_sharehead,
6167cc0979fSDavid Malone M_RMAN, M_NOWAIT | M_ZERO);
617dc22aef2SPyun YongHyeon if (s->r_sharehead == NULL) {
618af2a5c76SGarrett Wollman free(rv, M_RMAN);
619dc22aef2SPyun YongHyeon rv = NULL;
620af2a5c76SGarrett Wollman goto out;
621af2a5c76SGarrett Wollman }
622af2a5c76SGarrett Wollman LIST_INIT(s->r_sharehead);
623af2a5c76SGarrett Wollman LIST_INSERT_HEAD(s->r_sharehead, s,
624af2a5c76SGarrett Wollman r_sharelink);
625ecc6e7d5SDoug Rabson s->r_flags |= RF_FIRSTSHARE;
626af2a5c76SGarrett Wollman }
627af2a5c76SGarrett Wollman rv->r_sharehead = s->r_sharehead;
628af2a5c76SGarrett Wollman LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
629af2a5c76SGarrett Wollman goto out;
630af2a5c76SGarrett Wollman }
631af2a5c76SGarrett Wollman }
632af2a5c76SGarrett Wollman /*
633af2a5c76SGarrett Wollman * We couldn't find anything.
634af2a5c76SGarrett Wollman */
635af2a5c76SGarrett Wollman
636d3a68794SDon Lewis out:
6379ed346baSBosko Milekic mtx_unlock(rm->rm_mtx);
638dc22aef2SPyun YongHyeon return (rv == NULL ? NULL : &rv->r_r);
639af2a5c76SGarrett Wollman }
640af2a5c76SGarrett Wollman
641af2a5c76SGarrett Wollman int
rman_activate_resource(struct resource * re)642ae7ff71fSPoul-Henning Kamp rman_activate_resource(struct resource *re)
643af2a5c76SGarrett Wollman {
644d3a68794SDon Lewis struct resource_i *r;
645af2a5c76SGarrett Wollman struct rman *rm;
646af2a5c76SGarrett Wollman
647ae7ff71fSPoul-Henning Kamp r = re->__r_i;
648af2a5c76SGarrett Wollman rm = r->r_rm;
6499ed346baSBosko Milekic mtx_lock(rm->rm_mtx);
650d3a68794SDon Lewis r->r_flags |= RF_ACTIVE;
6519ed346baSBosko Milekic mtx_unlock(rm->rm_mtx);
6526182fdbdSPeter Wemm return 0;
6536182fdbdSPeter Wemm }
6546182fdbdSPeter Wemm
655af2a5c76SGarrett Wollman int
rman_deactivate_resource(struct resource * r)656af2a5c76SGarrett Wollman rman_deactivate_resource(struct resource *r)
657af2a5c76SGarrett Wollman {
658af2a5c76SGarrett Wollman struct rman *rm;
659af2a5c76SGarrett Wollman
660ae7ff71fSPoul-Henning Kamp rm = r->__r_i->r_rm;
6619ed346baSBosko Milekic mtx_lock(rm->rm_mtx);
662d3a68794SDon Lewis r->__r_i->r_flags &= ~RF_ACTIVE;
6639ed346baSBosko Milekic mtx_unlock(rm->rm_mtx);
664af2a5c76SGarrett Wollman return 0;
665af2a5c76SGarrett Wollman }
666af2a5c76SGarrett Wollman
667af2a5c76SGarrett Wollman static int
int_rman_release_resource(struct rman * rm,struct resource_i * r)668ae7ff71fSPoul-Henning Kamp int_rman_release_resource(struct rman *rm, struct resource_i *r)
669af2a5c76SGarrett Wollman {
670ae7ff71fSPoul-Henning Kamp struct resource_i *s, *t;
671af2a5c76SGarrett Wollman
672af2a5c76SGarrett Wollman if (r->r_flags & RF_ACTIVE)
673d3a68794SDon Lewis r->r_flags &= ~RF_ACTIVE;
674af2a5c76SGarrett Wollman
675af2a5c76SGarrett Wollman /*
676af2a5c76SGarrett Wollman * Check for a sharing list first. If there is one, then we don't
677af2a5c76SGarrett Wollman * have to think as hard.
678af2a5c76SGarrett Wollman */
679af2a5c76SGarrett Wollman if (r->r_sharehead) {
680af2a5c76SGarrett Wollman /*
681af2a5c76SGarrett Wollman * If a sharing list exists, then we know there are at
682af2a5c76SGarrett Wollman * least two sharers.
683af2a5c76SGarrett Wollman *
684af2a5c76SGarrett Wollman * If we are in the main circleq, appoint someone else.
685af2a5c76SGarrett Wollman */
686af2a5c76SGarrett Wollman LIST_REMOVE(r, r_sharelink);
6871b727751SPoul-Henning Kamp s = LIST_FIRST(r->r_sharehead);
688af2a5c76SGarrett Wollman if (r->r_flags & RF_FIRSTSHARE) {
689af2a5c76SGarrett Wollman s->r_flags |= RF_FIRSTSHARE;
690a077f635SKirk McKusick TAILQ_INSERT_BEFORE(r, s, r_link);
691a077f635SKirk McKusick TAILQ_REMOVE(&rm->rm_list, r, r_link);
692af2a5c76SGarrett Wollman }
693af2a5c76SGarrett Wollman
694af2a5c76SGarrett Wollman /*
695af2a5c76SGarrett Wollman * Make sure that the sharing list goes away completely
696af2a5c76SGarrett Wollman * if the resource is no longer being shared at all.
697af2a5c76SGarrett Wollman */
698dc22aef2SPyun YongHyeon if (LIST_NEXT(s, r_sharelink) == NULL) {
699af2a5c76SGarrett Wollman free(s->r_sharehead, M_RMAN);
700dc22aef2SPyun YongHyeon s->r_sharehead = NULL;
701af2a5c76SGarrett Wollman s->r_flags &= ~RF_FIRSTSHARE;
702af2a5c76SGarrett Wollman }
703af2a5c76SGarrett Wollman goto out;
704af2a5c76SGarrett Wollman }
705af2a5c76SGarrett Wollman
706af2a5c76SGarrett Wollman /*
707af2a5c76SGarrett Wollman * Look at the adjacent resources in the list and see if our
7085cc00cfcSJohn Baldwin * segment can be merged with any of them. If either of the
7095cc00cfcSJohn Baldwin * resources is allocated or is not exactly adjacent then they
7105cc00cfcSJohn Baldwin * cannot be merged with our segment.
711af2a5c76SGarrett Wollman */
712a077f635SKirk McKusick s = TAILQ_PREV(r, resource_head, r_link);
7135cc00cfcSJohn Baldwin if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
7145cc00cfcSJohn Baldwin s->r_end + 1 != r->r_start))
7155cc00cfcSJohn Baldwin s = NULL;
716a077f635SKirk McKusick t = TAILQ_NEXT(r, r_link);
7175cc00cfcSJohn Baldwin if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
7185cc00cfcSJohn Baldwin r->r_end + 1 != t->r_start))
7195cc00cfcSJohn Baldwin t = NULL;
720af2a5c76SGarrett Wollman
7215cc00cfcSJohn Baldwin if (s != NULL && t != NULL) {
722af2a5c76SGarrett Wollman /*
723af2a5c76SGarrett Wollman * Merge all three segments.
724af2a5c76SGarrett Wollman */
725af2a5c76SGarrett Wollman s->r_end = t->r_end;
726a077f635SKirk McKusick TAILQ_REMOVE(&rm->rm_list, r, r_link);
727a077f635SKirk McKusick TAILQ_REMOVE(&rm->rm_list, t, r_link);
728af2a5c76SGarrett Wollman free(t, M_RMAN);
7295cc00cfcSJohn Baldwin } else if (s != NULL) {
730af2a5c76SGarrett Wollman /*
731af2a5c76SGarrett Wollman * Merge previous segment with ours.
732af2a5c76SGarrett Wollman */
733af2a5c76SGarrett Wollman s->r_end = r->r_end;
734a077f635SKirk McKusick TAILQ_REMOVE(&rm->rm_list, r, r_link);
7355cc00cfcSJohn Baldwin } else if (t != NULL) {
736af2a5c76SGarrett Wollman /*
737af2a5c76SGarrett Wollman * Merge next segment with ours.
738af2a5c76SGarrett Wollman */
739af2a5c76SGarrett Wollman t->r_start = r->r_start;
740a077f635SKirk McKusick TAILQ_REMOVE(&rm->rm_list, r, r_link);
741af2a5c76SGarrett Wollman } else {
742af2a5c76SGarrett Wollman /*
743af2a5c76SGarrett Wollman * At this point, we know there is nothing we
744af2a5c76SGarrett Wollman * can potentially merge with, because on each
745af2a5c76SGarrett Wollman * side, there is either nothing there or what is
746af2a5c76SGarrett Wollman * there is still allocated. In that case, we don't
747af2a5c76SGarrett Wollman * want to remove r from the list; we simply want to
748af2a5c76SGarrett Wollman * change it to an unallocated region and return
749af2a5c76SGarrett Wollman * without freeing anything.
750af2a5c76SGarrett Wollman */
751af2a5c76SGarrett Wollman r->r_flags &= ~RF_ALLOCATED;
75269b63a9dSJohn Baldwin r->r_dev = NULL;
753af2a5c76SGarrett Wollman return 0;
754af2a5c76SGarrett Wollman }
755af2a5c76SGarrett Wollman
756af2a5c76SGarrett Wollman out:
757af2a5c76SGarrett Wollman free(r, M_RMAN);
758af2a5c76SGarrett Wollman return 0;
759af2a5c76SGarrett Wollman }
760af2a5c76SGarrett Wollman
761af2a5c76SGarrett Wollman int
rman_release_resource(struct resource * re)762ae7ff71fSPoul-Henning Kamp rman_release_resource(struct resource *re)
763af2a5c76SGarrett Wollman {
764af2a5c76SGarrett Wollman int rv;
765ae7ff71fSPoul-Henning Kamp struct resource_i *r;
766ae7ff71fSPoul-Henning Kamp struct rman *rm;
767af2a5c76SGarrett Wollman
768ae7ff71fSPoul-Henning Kamp r = re->__r_i;
769ae7ff71fSPoul-Henning Kamp rm = r->r_rm;
7709ed346baSBosko Milekic mtx_lock(rm->rm_mtx);
771af2a5c76SGarrett Wollman rv = int_rman_release_resource(rm, r);
7729ed346baSBosko Milekic mtx_unlock(rm->rm_mtx);
773af2a5c76SGarrett Wollman return (rv);
774af2a5c76SGarrett Wollman }
77585d693f9SWarner Losh
77685d693f9SWarner Losh uint32_t
rman_make_alignment_flags(uint32_t size)77785d693f9SWarner Losh rman_make_alignment_flags(uint32_t size)
77885d693f9SWarner Losh {
77985d693f9SWarner Losh
780bbfe0254SWarner Losh /*
781bbfe0254SWarner Losh * Find the hightest bit set, and add one if more than one bit
782bbfe0254SWarner Losh * set. We're effectively computing the ceil(log2(size)) here.
783bbfe0254SWarner Losh */
784996fa9fbSElliott Mitchell if (__predict_false(size == 0))
785996fa9fbSElliott Mitchell return (0);
786996fa9fbSElliott Mitchell return (RF_ALIGNMENT_LOG2(flsl(size - 1)));
78785d693f9SWarner Losh }
788647501a0SWarner Losh
7892dd1bdf1SJustin Hibbits rman_res_t
rman_get_start(const struct resource * r)790*cbcb9778SElliott Mitchell rman_get_start(const struct resource *r)
791647501a0SWarner Losh {
792626a7975SDon Lewis
793ae7ff71fSPoul-Henning Kamp return (r->__r_i->r_start);
794647501a0SWarner Losh }
795647501a0SWarner Losh
7962dd1bdf1SJustin Hibbits rman_res_t
rman_get_end(const struct resource * r)797*cbcb9778SElliott Mitchell rman_get_end(const struct resource *r)
798647501a0SWarner Losh {
799626a7975SDon Lewis
800ae7ff71fSPoul-Henning Kamp return (r->__r_i->r_end);
801647501a0SWarner Losh }
802647501a0SWarner Losh
8032dd1bdf1SJustin Hibbits rman_res_t
rman_get_size(const struct resource * r)804*cbcb9778SElliott Mitchell rman_get_size(const struct resource *r)
805647501a0SWarner Losh {
806626a7975SDon Lewis
807ae7ff71fSPoul-Henning Kamp return (r->__r_i->r_end - r->__r_i->r_start + 1);
808647501a0SWarner Losh }
809647501a0SWarner Losh
810647501a0SWarner Losh u_int
rman_get_flags(const struct resource * r)811*cbcb9778SElliott Mitchell rman_get_flags(const struct resource *r)
812647501a0SWarner Losh {
813626a7975SDon Lewis
814ae7ff71fSPoul-Henning Kamp return (r->__r_i->r_flags);
815647501a0SWarner Losh }
816647501a0SWarner Losh
817647501a0SWarner Losh void
rman_set_virtual(struct resource * r,void * v)818647501a0SWarner Losh rman_set_virtual(struct resource *r, void *v)
819647501a0SWarner Losh {
820626a7975SDon Lewis
821ae7ff71fSPoul-Henning Kamp r->__r_i->r_virtual = v;
822647501a0SWarner Losh }
823647501a0SWarner Losh
824647501a0SWarner Losh void *
rman_get_virtual(const struct resource * r)825*cbcb9778SElliott Mitchell rman_get_virtual(const struct resource *r)
826647501a0SWarner Losh {
827626a7975SDon Lewis
828ae7ff71fSPoul-Henning Kamp return (r->__r_i->r_virtual);
829647501a0SWarner Losh }
830647501a0SWarner Losh
831647501a0SWarner Losh void
rman_set_irq_cookie(struct resource * r,void * c)83282a5a275SAndriy Gapon rman_set_irq_cookie(struct resource *r, void *c)
83382a5a275SAndriy Gapon {
83482a5a275SAndriy Gapon
83582a5a275SAndriy Gapon r->__r_i->r_irq_cookie = c;
83682a5a275SAndriy Gapon }
83782a5a275SAndriy Gapon
83882a5a275SAndriy Gapon void *
rman_get_irq_cookie(const struct resource * r)839*cbcb9778SElliott Mitchell rman_get_irq_cookie(const struct resource *r)
84082a5a275SAndriy Gapon {
84182a5a275SAndriy Gapon
84282a5a275SAndriy Gapon return (r->__r_i->r_irq_cookie);
84382a5a275SAndriy Gapon }
84482a5a275SAndriy Gapon
84582a5a275SAndriy Gapon void
rman_set_bustag(struct resource * r,bus_space_tag_t t)846647501a0SWarner Losh rman_set_bustag(struct resource *r, bus_space_tag_t t)
847647501a0SWarner Losh {
848626a7975SDon Lewis
849647501a0SWarner Losh r->r_bustag = t;
850647501a0SWarner Losh }
851647501a0SWarner Losh
852647501a0SWarner Losh bus_space_tag_t
rman_get_bustag(const struct resource * r)853*cbcb9778SElliott Mitchell rman_get_bustag(const struct resource *r)
854647501a0SWarner Losh {
855626a7975SDon Lewis
856647501a0SWarner Losh return (r->r_bustag);
857647501a0SWarner Losh }
858647501a0SWarner Losh
859647501a0SWarner Losh void
rman_set_bushandle(struct resource * r,bus_space_handle_t h)860647501a0SWarner Losh rman_set_bushandle(struct resource *r, bus_space_handle_t h)
861647501a0SWarner Losh {
862626a7975SDon Lewis
863647501a0SWarner Losh r->r_bushandle = h;
864647501a0SWarner Losh }
865647501a0SWarner Losh
866647501a0SWarner Losh bus_space_handle_t
rman_get_bushandle(const struct resource * r)867*cbcb9778SElliott Mitchell rman_get_bushandle(const struct resource *r)
868647501a0SWarner Losh {
869626a7975SDon Lewis
870647501a0SWarner Losh return (r->r_bushandle);
871647501a0SWarner Losh }
872647501a0SWarner Losh
873647501a0SWarner Losh void
rman_set_mapping(struct resource * r,struct resource_map * map)874cc981af2SJohn Baldwin rman_set_mapping(struct resource *r, struct resource_map *map)
875cc981af2SJohn Baldwin {
876cc981af2SJohn Baldwin
877cc981af2SJohn Baldwin KASSERT(rman_get_size(r) == map->r_size,
878cc981af2SJohn Baldwin ("rman_set_mapping: size mismatch"));
879cc981af2SJohn Baldwin rman_set_bustag(r, map->r_bustag);
880cc981af2SJohn Baldwin rman_set_bushandle(r, map->r_bushandle);
881cc981af2SJohn Baldwin rman_set_virtual(r, map->r_vaddr);
882cc981af2SJohn Baldwin }
883cc981af2SJohn Baldwin
884cc981af2SJohn Baldwin void
rman_get_mapping(const struct resource * r,struct resource_map * map)885*cbcb9778SElliott Mitchell rman_get_mapping(const struct resource *r, struct resource_map *map)
886cc981af2SJohn Baldwin {
887cc981af2SJohn Baldwin
888cc981af2SJohn Baldwin map->r_bustag = rman_get_bustag(r);
889cc981af2SJohn Baldwin map->r_bushandle = rman_get_bushandle(r);
890cc981af2SJohn Baldwin map->r_size = rman_get_size(r);
891cc981af2SJohn Baldwin map->r_vaddr = rman_get_virtual(r);
892cc981af2SJohn Baldwin }
893cc981af2SJohn Baldwin
894cc981af2SJohn Baldwin void
rman_set_rid(struct resource * r,int rid)895647501a0SWarner Losh rman_set_rid(struct resource *r, int rid)
896647501a0SWarner Losh {
897626a7975SDon Lewis
898ae7ff71fSPoul-Henning Kamp r->__r_i->r_rid = rid;
899647501a0SWarner Losh }
900647501a0SWarner Losh
901647501a0SWarner Losh int
rman_get_rid(const struct resource * r)902*cbcb9778SElliott Mitchell rman_get_rid(const struct resource *r)
903647501a0SWarner Losh {
904626a7975SDon Lewis
905ae7ff71fSPoul-Henning Kamp return (r->__r_i->r_rid);
906647501a0SWarner Losh }
907b235704dSWarner Losh
908f351862aSWarner Losh void
rman_set_type(struct resource * r,int type)909b30a80b6SJohn Baldwin rman_set_type(struct resource *r, int type)
910b30a80b6SJohn Baldwin {
911b30a80b6SJohn Baldwin r->__r_i->r_type = type;
912b30a80b6SJohn Baldwin }
913b30a80b6SJohn Baldwin
914b30a80b6SJohn Baldwin int
rman_get_type(const struct resource * r)915*cbcb9778SElliott Mitchell rman_get_type(const struct resource *r)
916b30a80b6SJohn Baldwin {
917b30a80b6SJohn Baldwin return (r->__r_i->r_type);
918b30a80b6SJohn Baldwin }
919b30a80b6SJohn Baldwin
920b30a80b6SJohn Baldwin void
rman_set_device(struct resource * r,device_t dev)921d17f808fSAdrian Chadd rman_set_device(struct resource *r, device_t dev)
922f351862aSWarner Losh {
923626a7975SDon Lewis
924ae7ff71fSPoul-Henning Kamp r->__r_i->r_dev = dev;
925f351862aSWarner Losh }
926f351862aSWarner Losh
927d17f808fSAdrian Chadd device_t
rman_get_device(const struct resource * r)928*cbcb9778SElliott Mitchell rman_get_device(const struct resource *r)
929cf22c63dSJohn Baldwin {
930626a7975SDon Lewis
931cf22c63dSJohn Baldwin return (r->__r_i->r_dev);
932cf22c63dSJohn Baldwin }
933cf22c63dSJohn Baldwin
9342b35175cSPoul-Henning Kamp int
rman_is_region_manager(const struct resource * r,const struct rman * rm)935*cbcb9778SElliott Mitchell rman_is_region_manager(const struct resource *r, const struct rman *rm)
9362b35175cSPoul-Henning Kamp {
9372b35175cSPoul-Henning Kamp
9382b35175cSPoul-Henning Kamp return (r->__r_i->r_rm == rm);
9392b35175cSPoul-Henning Kamp }
9402b35175cSPoul-Henning Kamp
941eb0d6cdeSPoul-Henning Kamp /*
942eb0d6cdeSPoul-Henning Kamp * Sysctl interface for scanning the resource lists.
943eb0d6cdeSPoul-Henning Kamp *
944eb0d6cdeSPoul-Henning Kamp * We take two input parameters; the index into the list of resource
945eb0d6cdeSPoul-Henning Kamp * managers, and the resource offset into the list.
946eb0d6cdeSPoul-Henning Kamp */
947eb0d6cdeSPoul-Henning Kamp static int
sysctl_rman(SYSCTL_HANDLER_ARGS)948eb0d6cdeSPoul-Henning Kamp sysctl_rman(SYSCTL_HANDLER_ARGS)
949eb0d6cdeSPoul-Henning Kamp {
950eb0d6cdeSPoul-Henning Kamp int *name = (int *)arg1;
951eb0d6cdeSPoul-Henning Kamp u_int namelen = arg2;
952eb0d6cdeSPoul-Henning Kamp int rman_idx, res_idx;
953eb0d6cdeSPoul-Henning Kamp struct rman *rm;
954ae7ff71fSPoul-Henning Kamp struct resource_i *res;
95551ca6cd6SAndriy Gapon struct resource_i *sres;
956eb0d6cdeSPoul-Henning Kamp struct u_rman urm;
957eb0d6cdeSPoul-Henning Kamp struct u_resource ures;
958eb0d6cdeSPoul-Henning Kamp int error;
959eb0d6cdeSPoul-Henning Kamp
960eb0d6cdeSPoul-Henning Kamp if (namelen != 3)
961eb0d6cdeSPoul-Henning Kamp return (EINVAL);
962eb0d6cdeSPoul-Henning Kamp
963eb0d6cdeSPoul-Henning Kamp if (bus_data_generation_check(name[0]))
964eb0d6cdeSPoul-Henning Kamp return (EINVAL);
965eb0d6cdeSPoul-Henning Kamp rman_idx = name[1];
966eb0d6cdeSPoul-Henning Kamp res_idx = name[2];
967eb0d6cdeSPoul-Henning Kamp
968eb0d6cdeSPoul-Henning Kamp /*
969eb0d6cdeSPoul-Henning Kamp * Find the indexed resource manager
970eb0d6cdeSPoul-Henning Kamp */
971dc22aef2SPyun YongHyeon mtx_lock(&rman_mtx);
972eb0d6cdeSPoul-Henning Kamp TAILQ_FOREACH(rm, &rman_head, rm_link) {
973eb0d6cdeSPoul-Henning Kamp if (rman_idx-- == 0)
974eb0d6cdeSPoul-Henning Kamp break;
975eb0d6cdeSPoul-Henning Kamp }
976dc22aef2SPyun YongHyeon mtx_unlock(&rman_mtx);
977eb0d6cdeSPoul-Henning Kamp if (rm == NULL)
978eb0d6cdeSPoul-Henning Kamp return (ENOENT);
979eb0d6cdeSPoul-Henning Kamp
980eb0d6cdeSPoul-Henning Kamp /*
981eb0d6cdeSPoul-Henning Kamp * If the resource index is -1, we want details on the
982eb0d6cdeSPoul-Henning Kamp * resource manager.
983eb0d6cdeSPoul-Henning Kamp */
984eb0d6cdeSPoul-Henning Kamp if (res_idx == -1) {
985fd94099eSColin Percival bzero(&urm, sizeof(urm));
986eb0d6cdeSPoul-Henning Kamp urm.rm_handle = (uintptr_t)rm;
9872e0ce59fSMarcel Moolenaar if (rm->rm_descr != NULL)
988eb0d6cdeSPoul-Henning Kamp strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
989eb0d6cdeSPoul-Henning Kamp urm.rm_start = rm->rm_start;
990eb0d6cdeSPoul-Henning Kamp urm.rm_size = rm->rm_end - rm->rm_start + 1;
991eb0d6cdeSPoul-Henning Kamp urm.rm_type = rm->rm_type;
992eb0d6cdeSPoul-Henning Kamp
993eb0d6cdeSPoul-Henning Kamp error = SYSCTL_OUT(req, &urm, sizeof(urm));
994eb0d6cdeSPoul-Henning Kamp return (error);
995eb0d6cdeSPoul-Henning Kamp }
996eb0d6cdeSPoul-Henning Kamp
997eb0d6cdeSPoul-Henning Kamp /*
998eb0d6cdeSPoul-Henning Kamp * Find the indexed resource and return it.
999eb0d6cdeSPoul-Henning Kamp */
1000dc22aef2SPyun YongHyeon mtx_lock(rm->rm_mtx);
1001eb0d6cdeSPoul-Henning Kamp TAILQ_FOREACH(res, &rm->rm_list, r_link) {
100251ca6cd6SAndriy Gapon if (res->r_sharehead != NULL) {
100351ca6cd6SAndriy Gapon LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1004eb0d6cdeSPoul-Henning Kamp if (res_idx-- == 0) {
100551ca6cd6SAndriy Gapon res = sres;
100651ca6cd6SAndriy Gapon goto found;
100751ca6cd6SAndriy Gapon }
100851ca6cd6SAndriy Gapon }
100951ca6cd6SAndriy Gapon else if (res_idx-- == 0)
101051ca6cd6SAndriy Gapon goto found;
101151ca6cd6SAndriy Gapon }
101251ca6cd6SAndriy Gapon mtx_unlock(rm->rm_mtx);
101351ca6cd6SAndriy Gapon return (ENOENT);
101451ca6cd6SAndriy Gapon
101551ca6cd6SAndriy Gapon found:
1016fd94099eSColin Percival bzero(&ures, sizeof(ures));
1017eb0d6cdeSPoul-Henning Kamp ures.r_handle = (uintptr_t)res;
1018eb0d6cdeSPoul-Henning Kamp ures.r_parent = (uintptr_t)res->r_rm;
1019eb0d6cdeSPoul-Henning Kamp ures.r_device = (uintptr_t)res->r_dev;
1020eb0d6cdeSPoul-Henning Kamp if (res->r_dev != NULL) {
1021eb0d6cdeSPoul-Henning Kamp if (device_get_name(res->r_dev) != NULL) {
1022eb0d6cdeSPoul-Henning Kamp snprintf(ures.r_devname, RM_TEXTLEN,
1023eb0d6cdeSPoul-Henning Kamp "%s%d",
1024eb0d6cdeSPoul-Henning Kamp device_get_name(res->r_dev),
1025eb0d6cdeSPoul-Henning Kamp device_get_unit(res->r_dev));
1026eb0d6cdeSPoul-Henning Kamp } else {
1027eb0d6cdeSPoul-Henning Kamp strlcpy(ures.r_devname, "nomatch",
1028eb0d6cdeSPoul-Henning Kamp RM_TEXTLEN);
1029eb0d6cdeSPoul-Henning Kamp }
1030eb0d6cdeSPoul-Henning Kamp } else {
1031eb0d6cdeSPoul-Henning Kamp ures.r_devname[0] = '\0';
1032eb0d6cdeSPoul-Henning Kamp }
1033eb0d6cdeSPoul-Henning Kamp ures.r_start = res->r_start;
1034eb0d6cdeSPoul-Henning Kamp ures.r_size = res->r_end - res->r_start + 1;
1035eb0d6cdeSPoul-Henning Kamp ures.r_flags = res->r_flags;
1036eb0d6cdeSPoul-Henning Kamp
1037dc22aef2SPyun YongHyeon mtx_unlock(rm->rm_mtx);
1038eb0d6cdeSPoul-Henning Kamp error = SYSCTL_OUT(req, &ures, sizeof(ures));
1039eb0d6cdeSPoul-Henning Kamp return (error);
1040eb0d6cdeSPoul-Henning Kamp }
1041eb0d6cdeSPoul-Henning Kamp
10427029da5cSPawel Biernacki static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD | CTLFLAG_MPSAFE,
10437029da5cSPawel Biernacki sysctl_rman,
1044eb0d6cdeSPoul-Henning Kamp "kernel resource manager");
10452248f680SJohn Baldwin
10462248f680SJohn Baldwin #ifdef DDB
10472248f680SJohn Baldwin static void
dump_rman_header(struct rman * rm)10480f4d3c92SGavin Atkinson dump_rman_header(struct rman *rm)
10490f4d3c92SGavin Atkinson {
10500f4d3c92SGavin Atkinson
10510f4d3c92SGavin Atkinson if (db_pager_quit)
10520f4d3c92SGavin Atkinson return;
1053da1b038aSJustin Hibbits db_printf("rman %p: %s (0x%jx-0x%jx full range)\n",
1054da1b038aSJustin Hibbits rm, rm->rm_descr, (rman_res_t)rm->rm_start, (rman_res_t)rm->rm_end);
10550f4d3c92SGavin Atkinson }
10560f4d3c92SGavin Atkinson
10570f4d3c92SGavin Atkinson static void
dump_rman(struct rman * rm)10582248f680SJohn Baldwin dump_rman(struct rman *rm)
10592248f680SJohn Baldwin {
10602248f680SJohn Baldwin struct resource_i *r;
10612248f680SJohn Baldwin const char *devname;
10622248f680SJohn Baldwin
10632248f680SJohn Baldwin if (db_pager_quit)
10642248f680SJohn Baldwin return;
10652248f680SJohn Baldwin TAILQ_FOREACH(r, &rm->rm_list, r_link) {
10662248f680SJohn Baldwin if (r->r_dev != NULL) {
10672248f680SJohn Baldwin devname = device_get_nameunit(r->r_dev);
10682248f680SJohn Baldwin if (devname == NULL)
10692248f680SJohn Baldwin devname = "nomatch";
10702248f680SJohn Baldwin } else
10712248f680SJohn Baldwin devname = NULL;
1072da1b038aSJustin Hibbits db_printf(" 0x%jx-0x%jx (RID=%d) ",
1073db41d262SJohn Baldwin r->r_start, r->r_end, r->r_rid);
10742248f680SJohn Baldwin if (devname != NULL)
10752248f680SJohn Baldwin db_printf("(%s)\n", devname);
10762248f680SJohn Baldwin else
10772248f680SJohn Baldwin db_printf("----\n");
10782248f680SJohn Baldwin if (db_pager_quit)
10792248f680SJohn Baldwin return;
10802248f680SJohn Baldwin }
10812248f680SJohn Baldwin }
10822248f680SJohn Baldwin
DB_SHOW_COMMAND(rman,db_show_rman)10832248f680SJohn Baldwin DB_SHOW_COMMAND(rman, db_show_rman)
10842248f680SJohn Baldwin {
10852248f680SJohn Baldwin
10860f4d3c92SGavin Atkinson if (have_addr) {
10870f4d3c92SGavin Atkinson dump_rman_header((struct rman *)addr);
10882248f680SJohn Baldwin dump_rman((struct rman *)addr);
10892248f680SJohn Baldwin }
10900f4d3c92SGavin Atkinson }
10910f4d3c92SGavin Atkinson
DB_SHOW_COMMAND_FLAGS(rmans,db_show_rmans,DB_CMD_MEMSAFE)1092c84c5e00SMitchell Horne DB_SHOW_COMMAND_FLAGS(rmans, db_show_rmans, DB_CMD_MEMSAFE)
10930f4d3c92SGavin Atkinson {
10940f4d3c92SGavin Atkinson struct rman *rm;
10950f4d3c92SGavin Atkinson
10960f4d3c92SGavin Atkinson TAILQ_FOREACH(rm, &rman_head, rm_link) {
10970f4d3c92SGavin Atkinson dump_rman_header(rm);
10980f4d3c92SGavin Atkinson }
10990f4d3c92SGavin Atkinson }
11002248f680SJohn Baldwin
DB_SHOW_ALL_COMMAND(rman,db_show_all_rman)110139297ba4SSam Leffler DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
11022248f680SJohn Baldwin {
11032248f680SJohn Baldwin struct rman *rm;
11042248f680SJohn Baldwin
11050f4d3c92SGavin Atkinson TAILQ_FOREACH(rm, &rman_head, rm_link) {
11060f4d3c92SGavin Atkinson dump_rman_header(rm);
11072248f680SJohn Baldwin dump_rman(rm);
11082248f680SJohn Baldwin }
11090f4d3c92SGavin Atkinson }
1110c84c5e00SMitchell Horne DB_SHOW_ALIAS_FLAGS(allrman, db_show_all_rman, DB_CMD_MEMSAFE);
11112248f680SJohn Baldwin #endif
1112