1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 #ifndef _VM_HAT_H 40 #define _VM_HAT_H 41 42 #pragma ident "%Z%%M% %I% %E% SMI" 43 44 #include <sys/types.h> 45 #include <sys/t_lock.h> 46 #include <vm/faultcode.h> 47 #include <sys/kstat.h> 48 #include <sys/siginfo.h> 49 50 #ifdef __cplusplus 51 extern "C" { 52 #endif 53 54 /* 55 * VM - Hardware Address Translation management. 56 * 57 * This file describes the machine independent interfaces to 58 * the hardware address translation management routines. Other 59 * machine specific interfaces and structures are defined 60 * in <vm/hat_xxx.h>. The hat layer manages the address 61 * translation hardware as a cache driven by calls from the 62 * higher levels of the VM system. 63 */ 64 65 struct hat; 66 struct kpme; 67 struct memseg; 68 69 #include <vm/page.h> 70 71 /* 72 * a callback used with hat_unload_callback() 73 * start and end mark are set to a range of unloaded addresses 74 * and the function is invoked with a pointer to this data structure 75 */ 76 typedef struct hat_callback { 77 caddr_t hcb_start_addr; 78 caddr_t hcb_end_addr; 79 void (*hcb_function)(struct hat_callback *); 80 void *hcb_data; 81 } hat_callback_t; 82 83 typedef void *hat_region_cookie_t; 84 85 #ifdef _KERNEL 86 87 /* 88 * One time hat initialization 89 */ 90 void hat_init(void); 91 92 /* 93 * Notify hat of a system dump 94 */ 95 void hat_dump(void); 96 97 /* 98 * Operations on an address space: 99 * 100 * struct hat *hat_alloc(as) 101 * allocated a hat structure for as. 102 * 103 * void hat_free_start(hat) 104 * informs hat layer process has finished executing but as has not 105 * been cleaned up yet. 106 * 107 * void hat_free_end(hat) 108 * informs hat layer as is being destroyed. hat layer cannot use as 109 * pointer after this call. 110 * 111 * void hat_swapin(hat) 112 * allocate any hat resources required for process being swapped in. 113 * 114 * void hat_swapout(hat) 115 * deallocate hat resources for process being swapped out. 116 * 117 * size_t hat_get_mapped_size(hat) 118 * returns number of bytes that have valid mappings in hat. 119 * 120 * void hat_stats_enable(hat) 121 * void hat_stats_disable(hat) 122 * enables/disables collection of stats for hat. 123 * 124 * int hat_dup(parenthat, childhat, addr, len, flags) 125 * Duplicate address translations of the parent to the child. Supports 126 * the entire address range or a range depending on flag, 127 * zero returned on success, non-zero on error 128 * 129 * void hat_thread_exit(thread) 130 * Notifies the HAT that a thread is exiting, called after it has been 131 * reassigned to the kernel AS. 132 */ 133 134 struct hat *hat_alloc(struct as *); 135 void hat_free_start(struct hat *); 136 void hat_free_end(struct hat *); 137 int hat_dup(struct hat *, struct hat *, caddr_t, size_t, uint_t); 138 void hat_swapin(struct hat *); 139 void hat_swapout(struct hat *); 140 size_t hat_get_mapped_size(struct hat *); 141 int hat_stats_enable(struct hat *); 142 void hat_stats_disable(struct hat *); 143 void hat_thread_exit(kthread_t *); 144 145 /* 146 * Operations on a named address within a segment: 147 * 148 * void hat_memload(hat, addr, pp, attr, flags) 149 * load/lock the given page struct 150 * 151 * void hat_memload_array(hat, addr, len, ppa, attr, flags) 152 * load/lock the given array of page structs 153 * 154 * void hat_devload(hat, addr, len, pf, attr, flags) 155 * load/lock the given page frame number 156 * 157 * void hat_unlock(hat, addr, len) 158 * unlock a given range of addresses 159 * 160 * void hat_unload(hat, addr, len, flags) 161 * void hat_unload_callback(hat, addr, len, flags, callback) 162 * unload a given range of addresses (has optional callback) 163 * 164 * void hat_sync(hat, addr, len, flags) 165 * synchronize mapping with software data structures 166 * 167 * void hat_map(hat, addr, len, flags) 168 * 169 * void hat_setattr(hat, addr, len, attr) 170 * void hat_clrattr(hat, addr, len, attr) 171 * void hat_chgattr(hat, addr, len, attr) 172 * modify attributes for a range of addresses. skips any invalid mappings 173 * 174 * uint_t hat_getattr(hat, addr, *attr) 175 * returns attr for <hat,addr> in *attr. returns 0 if there was a 176 * mapping and *attr is valid, nonzero if there was no mapping and 177 * *attr is not valid. 178 * 179 * size_t hat_getpagesize(hat, addr) 180 * returns pagesize in bytes for <hat, addr>. returns -1 if there is 181 * no mapping. This is an advisory call. 182 * 183 * pfn_t hat_getpfnum(hat, addr) 184 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid. 185 * 186 * pfn_t hat_getkpfnum(addr) 187 * returns pfn for non-memory mapped addr in kernel address space 188 * or PFN_INVALID if mapping is invalid or is kernel memory. 189 * 190 * int hat_probe(hat, addr) 191 * return 0 if no valid mapping is present. Faster version 192 * of hat_getattr in certain architectures. 193 * 194 * int hat_share(dhat, daddr, shat, saddr, len, szc) 195 * 196 * void hat_unshare(hat, addr, len, szc) 197 * 198 * void hat_chgprot(hat, addr, len, vprot) 199 * This is a deprecated call. New segment drivers should store 200 * all attributes and use hat_*attr calls. 201 * Change the protections in the virtual address range 202 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 203 * then remove write permission, leaving the other permissions 204 * unchanged. If vprot is ~PROT_USER, remove user permissions. 205 */ 206 207 void hat_memload(struct hat *, caddr_t, struct page *, uint_t, uint_t); 208 void hat_memload_array(struct hat *, caddr_t, size_t, struct page **, 209 uint_t, uint_t); 210 void hat_memload_region(struct hat *, caddr_t, struct page *, uint_t, 211 uint_t, hat_region_cookie_t); 212 void hat_memload_array_region(struct hat *, caddr_t, size_t, struct page **, 213 uint_t, uint_t, hat_region_cookie_t); 214 215 void hat_devload(struct hat *, caddr_t, size_t, pfn_t, uint_t, int); 216 217 void hat_unlock(struct hat *, caddr_t, size_t); 218 void hat_unlock_region(struct hat *, caddr_t, size_t, hat_region_cookie_t); 219 220 void hat_unload(struct hat *, caddr_t, size_t, uint_t); 221 void hat_unload_callback(struct hat *, caddr_t, size_t, uint_t, 222 hat_callback_t *); 223 void hat_sync(struct hat *, caddr_t, size_t, uint_t); 224 void hat_map(struct hat *, caddr_t, size_t, uint_t); 225 void hat_setattr(struct hat *, caddr_t, size_t, uint_t); 226 void hat_clrattr(struct hat *, caddr_t, size_t, uint_t); 227 void hat_chgattr(struct hat *, caddr_t, size_t, uint_t); 228 uint_t hat_getattr(struct hat *, caddr_t, uint_t *); 229 ssize_t hat_getpagesize(struct hat *, caddr_t); 230 pfn_t hat_getpfnum(struct hat *, caddr_t); 231 int hat_probe(struct hat *, caddr_t); 232 int hat_share(struct hat *, caddr_t, struct hat *, caddr_t, size_t, uint_t); 233 void hat_unshare(struct hat *, caddr_t, size_t, uint_t); 234 void hat_chgprot(struct hat *, caddr_t, size_t, uint_t); 235 void hat_reserve(struct as *, caddr_t, size_t); 236 pfn_t va_to_pfn(void *); 237 uint64_t va_to_pa(void *); 238 239 /* 240 * hat_getkpfnum() is never supported on amd64 and will be 241 * removed from other platforms in future release 242 */ 243 #if !defined(__amd64) 244 pfn_t hat_getkpfnum(caddr_t); 245 #endif 246 247 248 /* 249 * Kernel Physical Mapping (segkpm) hat interface routines. 250 */ 251 caddr_t hat_kpm_mapin(struct page *, struct kpme *); 252 void hat_kpm_mapout(struct page *, struct kpme *, caddr_t); 253 caddr_t hat_kpm_page2va(struct page *, int); 254 struct page *hat_kpm_vaddr2page(caddr_t); 255 int hat_kpm_fault(struct hat *, caddr_t); 256 void hat_kpm_mseghash_clear(int); 257 void hat_kpm_mseghash_update(pgcnt_t, struct memseg *); 258 void hat_kpm_addmem_mseg_update(struct memseg *, pgcnt_t, offset_t); 259 void hat_kpm_addmem_mseg_insert(struct memseg *); 260 void hat_kpm_addmem_memsegs_update(struct memseg *); 261 caddr_t hat_kpm_mseg_reuse(struct memseg *); 262 void hat_kpm_delmem_mseg_update(struct memseg *, struct memseg **); 263 void hat_kpm_split_mseg_update(struct memseg *, struct memseg **, 264 struct memseg *, struct memseg *, struct memseg *); 265 void hat_kpm_walk(void (*)(void *, void *, size_t), void *); 266 267 /* 268 * Operations on all translations for a given page(s) 269 * 270 * void hat_page_setattr(pp, flag) 271 * void hat_page_clrattr(pp, flag) 272 * used to set/clr red/mod bits. 273 * 274 * uint hat_page_getattr(pp, flag) 275 * If flag is specified, returns 0 if attribute is disabled 276 * and non zero if enabled. If flag specifes multiple attributs 277 * then returns 0 if ALL atriibutes are disabled. This is an advisory 278 * call. 279 * 280 * int hat_pageunload(pp, forceflag) 281 * unload all translations attached to pp. 282 * 283 * uint_t hat_pagesync(pp, flags) 284 * get hw stats from hardware into page struct and reset hw stats 285 * returns attributes of page 286 * 287 * ulong_t hat_page_getshare(pp) 288 * returns approx number of mappings to this pp. A return of 0 implies 289 * there are no mappings to the page. 290 * 291 * faultcode_t hat_softlock(hat, addr, lenp, ppp, flags); 292 * called to softlock pages for zero copy tcp 293 * 294 * void hat_page_demote(pp); 295 * unload all large mappings to pp and decrease p_szc of all 296 * constituent pages according to the remaining mappings. 297 */ 298 299 void hat_page_setattr(struct page *, uint_t); 300 void hat_page_clrattr(struct page *, uint_t); 301 uint_t hat_page_getattr(struct page *, uint_t); 302 int hat_pageunload(struct page *, uint_t); 303 uint_t hat_pagesync(struct page *, uint_t); 304 ulong_t hat_page_getshare(struct page *); 305 int hat_page_checkshare(struct page *, ulong_t); 306 faultcode_t hat_softlock(struct hat *, caddr_t, size_t *, 307 struct page **, uint_t); 308 void hat_page_demote(struct page *); 309 310 /* 311 * Rountine to expose supported HAT features to PIM. 312 */ 313 enum hat_features { 314 HAT_SHARED_PT, /* Shared page tables */ 315 HAT_DYNAMIC_ISM_UNMAP, /* hat_pageunload() handles ISM pages */ 316 HAT_VMODSORT, /* support for VMODSORT flag of vnode */ 317 HAT_SHARED_REGIONS /* shared regions support */ 318 }; 319 320 int hat_supported(enum hat_features, void *); 321 322 /* 323 * Services provided to the hat: 324 * 325 * void as_signal_proc(as, siginfo) 326 * deliver signal to all processes that have this as. 327 * 328 * int hat_setstat(as, addr, len, rmbits) 329 * informs hatstat layer that ref/mod bits need to be updated for 330 * address range. Returns 0 on success, 1 for failure. 331 */ 332 void as_signal_proc(struct as *, k_siginfo_t *siginfo); 333 void hat_setstat(struct as *, caddr_t, size_t, uint_t); 334 335 /* 336 * Flags to pass to hat routines. 337 * 338 * Certain flags only apply to some interfaces: 339 * 340 * HAT_LOAD Default flags to load a translation to the page. 341 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(), 342 * and hat_devload(). 343 * HAT_LOAD_ADV Advisory load - Load translation if and only if 344 * sufficient MMU resources exist (i.e., do not steal). 345 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables 346 * that map some user pages (not kas) is shared by more 347 * than one process (eg. ISM). 348 * HAT_LOAD_CONTIG Pages are contigous 349 * HAT_LOAD_NOCONSIST Do not add mapping to mapping list. 350 * HAT_LOAD_REMAP Reload a valid pte with a different page frame. 351 * HAT_RELOAD_SHARE Reload a shared page table entry. Some platforms 352 * may require different actions than on the first 353 * load of a shared mapping. 354 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this 355 * point, it's setting up mapping to allocate internal 356 * hat layer data structures. This flag forces hat layer 357 * to tap its reserves in order to prevent infinite 358 * recursion. 359 * HAT_LOAD_TEXT A flag to hat_memload() to indicate loading text pages. 360 */ 361 362 /* 363 * Flags for hat_memload/hat_devload 364 */ 365 #define HAT_FLAGS_RESV 0xFF000000 /* resv for hat impl */ 366 #define HAT_LOAD 0x00 367 #define HAT_LOAD_LOCK 0x01 368 #define HAT_LOAD_ADV 0x04 369 #define HAT_LOAD_CONTIG 0x10 370 #define HAT_LOAD_NOCONSIST 0x20 371 #define HAT_LOAD_SHARE 0x40 372 #define HAT_LOAD_REMAP 0x80 373 #define HAT_RELOAD_SHARE 0x100 374 #define HAT_NO_KALLOC 0x200 375 #define HAT_LOAD_TEXT 0x400 376 377 /* 378 * Flags for initializing disable_*large_pages. 379 * 380 * HAT_AUTO_TEXT Get MMU specific disable_auto_text_large_pages 381 * HAT_AUTO_DATA Get MMU specific disable_auto_data_large_pages 382 */ 383 #define HAT_AUTO_TEXT 0x800 384 #define HAT_AUTO_DATA 0x1000 385 386 /* 387 * Attributes for hat_memload/hat_devload/hat_*attr 388 * are a superset of prot flags defined in mman.h. 389 */ 390 #define HAT_PLAT_ATTR_MASK 0xF00000 391 #define HAT_PROT_MASK 0x0F 392 393 #define HAT_NOFAULT 0x10 394 #define HAT_NOSYNC 0x20 395 396 /* 397 * Advisory ordering attributes. Apply only to device mappings. 398 * 399 * HAT_STRICTORDER: the CPU must issue the references in order, as the 400 * programmer specified. This is the default. 401 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds 402 * of reordering; store or load with store or load). 403 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores 404 * to consecutive locations (for example, turn two consecutive byte 405 * stores into one halfword store), and it may batch individual loads 406 * (for example, turn two consecutive byte loads into one halfword load). 407 * This also implies re-ordering. 408 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it 409 * until another store occurs. The default is to fetch new data 410 * on every load. This also implies merging. 411 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to 412 * the device (perhaps with other data) at a later time. The default is 413 * to push the data right away. This also implies load caching. 414 */ 415 #define HAT_STRICTORDER 0x0000 416 #define HAT_UNORDERED_OK 0x0100 417 #define HAT_MERGING_OK 0x0200 418 #define HAT_LOADCACHING_OK 0x0300 419 #define HAT_STORECACHING_OK 0x0400 420 #define HAT_ORDER_MASK 0x0700 421 422 /* endian attributes */ 423 #define HAT_NEVERSWAP 0x0000 424 #define HAT_STRUCTURE_BE 0x1000 425 #define HAT_STRUCTURE_LE 0x2000 426 #define HAT_ENDIAN_MASK 0x3000 427 428 /* flags for hat_softlock */ 429 #define HAT_COW 0x0001 430 431 /* 432 * Flags for hat_unload 433 */ 434 #define HAT_UNLOAD 0x00 435 #define HAT_UNLOAD_NOSYNC 0x02 436 #define HAT_UNLOAD_UNLOCK 0x04 437 #define HAT_UNLOAD_OTHER 0x08 438 #define HAT_UNLOAD_UNMAP 0x10 439 440 /* 441 * Flags for hat_pagesync, hat_getstat, hat_sync 442 */ 443 #define HAT_SYNC_DONTZERO 0x00 444 #define HAT_SYNC_ZERORM 0x01 445 /* Additional flags for hat_pagesync */ 446 #define HAT_SYNC_STOPON_REF 0x02 447 #define HAT_SYNC_STOPON_MOD 0x04 448 #define HAT_SYNC_STOPON_RM (HAT_SYNC_STOPON_REF | HAT_SYNC_STOPON_MOD) 449 #define HAT_SYNC_STOPON_SHARED 0x08 450 451 /* 452 * Flags for hat_dup 453 * 454 * HAT_DUP_ALL dup entire address space 455 * HAT_DUP_COW dup plus hat_clrattr(..PROT_WRITE) on newas 456 */ 457 #define HAT_DUP_ALL 1 458 #define HAT_DUP_COW 2 459 #define HAT_DUP_SRD 3 460 461 462 /* 463 * Flags for hat_map 464 */ 465 #define HAT_MAP 0x00 466 467 /* 468 * Flag for hat_pageunload 469 */ 470 #define HAT_ADV_PGUNLOAD 0x00 471 #define HAT_FORCE_PGUNLOAD 0x01 472 473 /* 474 * Attributes for hat_page_*attr, hat_setstats and 475 * returned by hat_pagesync. 476 */ 477 #define P_MOD 0x1 /* the modified bit */ 478 #define P_REF 0x2 /* the referenced bit */ 479 #define P_RO 0x4 /* Read only page */ 480 #define P_NSH 0x8 /* Not to shuffle v_pages */ 481 482 #define hat_ismod(pp) (hat_page_getattr(pp, P_MOD)) 483 #define hat_isref(pp) (hat_page_getattr(pp, P_REF)) 484 #define hat_isro(pp) (hat_page_getattr(pp, P_RO)) 485 486 #define hat_setmod(pp) (hat_page_setattr(pp, P_MOD)) 487 #define hat_setmod_only(pp) (hat_page_setattr(pp, P_MOD|P_NSH)) 488 #define hat_setref(pp) (hat_page_setattr(pp, P_REF)) 489 #define hat_setrefmod(pp) (hat_page_setattr(pp, P_REF|P_MOD)) 490 491 #define hat_clrmod(pp) (hat_page_clrattr(pp, P_MOD)) 492 #define hat_clrref(pp) (hat_page_clrattr(pp, P_REF)) 493 #define hat_clrrefmod(pp) (hat_page_clrattr(pp, P_REF|P_MOD)) 494 495 #define hat_page_is_mapped(pp) (hat_page_getshare(pp)) 496 497 /* 498 * hat_setup is being used in sparc/os/sundep.c 499 */ 500 void hat_setup(struct hat *, int); 501 502 /* 503 * Flags for hat_setup 504 */ 505 #define HAT_DONTALLOC 0 506 #define HAT_ALLOC 1 507 #define HAT_INIT 2 508 509 /* 510 * Other routines, for statistics 511 */ 512 int hat_startstat(struct as *); 513 void hat_getstat(struct as *, caddr_t, size_t, uint_t, char *, int); 514 void hat_freestat(struct as *, int); 515 void hat_resvstat(size_t, struct as *, caddr_t); 516 517 /* 518 * Transitionary routine while we still allow hat_getkpfnum(caddr_t) 519 * to return a pfn for kernel memory, but want to warn the user that 520 * it isn't supported. 521 */ 522 void hat_getkpfnum_badcall(void *caller); 523 524 /* 525 * Relocation callback routines. Currently only sfmmu HAT supports 526 * these. 527 */ 528 extern int hat_add_callback(id_t, caddr_t, uint_t, uint_t, void *, 529 pfn_t *, void **); 530 extern id_t hat_register_callback(int, 531 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 532 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 533 int (*errhandler)(caddr_t, uint_t, uint_t, void *), int); 534 extern void hat_delete_callback(caddr_t, uint_t, void *, uint_t, void *); 535 536 /* 537 * hat_add_callback()/hat_delete_callback() flags. 538 */ 539 #define HAC_NOSLEEP 0x0 540 #define HAC_SLEEP 0x1 541 #define HAC_PAGELOCK 0x2 542 543 /* 544 * Suspend/unsuspend handler callback arguments. 545 */ 546 #define HAT_SUSPEND 0x0010 547 #define HAT_UNSUSPEND 0x0010 548 #define HAT_PRESUSPEND 0x0020 549 #define HAT_POSTUNSUSPEND 0x0020 550 551 /* 552 * Error handler callback arguments. See the block comments 553 * before the implementation of hat_add_callback() for an 554 * explanation of what these mean. 555 */ 556 #define HAT_CB_ERR_LEAKED 0x1 557 558 #endif /* _KERNEL */ 559 560 /* 561 * The size of the bit array for ref and mod bit storage must be a power of 2. 562 * 2 bits are collected for each page. Below the power used is 4, 563 * which is 16 8-bit characters = 128 bits, ref and mod bit information 564 * for 64 pages. 565 */ 566 #define HRM_SHIFT 4 567 #define HRM_BYTES (1 << HRM_SHIFT) 568 #define HRM_PAGES ((HRM_BYTES * NBBY) / 2) 569 #define HRM_PGPERBYTE (NBBY/2) 570 #define HRM_PGBYTEMASK (HRM_PGPERBYTE-1) 571 572 #define HRM_PGOFFMASK ((HRM_PGPERBYTE-1) << MMU_PAGESHIFT) 573 #define HRM_BASEOFFSET (((MMU_PAGESIZE * HRM_PAGES) - 1)) 574 #define HRM_BASEMASK (~(HRM_BASEOFFSET)) 575 576 #define HRM_BASESHIFT (MMU_PAGESHIFT + (HRM_SHIFT + 2)) 577 #define HRM_PAGEMASK (MMU_PAGEMASK ^ HRM_BASEMASK) 578 579 #define HRM_HASHSIZE 0x200 580 #define HRM_HASHMASK (HRM_HASHSIZE - 1) 581 582 #define HRM_BLIST_INCR 0x200 583 584 /* 585 * The structure for maintaining referenced and modified information 586 */ 587 struct hrmstat { 588 struct as *hrm_as; /* stat block belongs to this as */ 589 uintptr_t hrm_base; /* base of block */ 590 ushort_t hrm_id; /* opaque identifier, one of a_vbits */ 591 struct hrmstat *hrm_anext; /* as statistics block list */ 592 struct hrmstat *hrm_hnext; /* list for hashed blocks */ 593 uchar_t hrm_bits[HRM_BYTES]; /* the ref and mod bits */ 594 }; 595 596 extern struct hrmstat **hrm_hashtab; 597 598 /* 599 * For global monitoring of the reference and modified bits 600 * of all address spaces we reserve one id bit. 601 */ 602 #define HRM_SWSMONID 1 603 604 605 #ifdef _KERNEL 606 607 /* 608 * Hat locking functions 609 * XXX - these two functions are currently being used by hatstats 610 * they can be removed by using a per-as mutex for hatstats. 611 */ 612 void hat_enter(struct hat *); 613 void hat_exit(struct hat *); 614 615 typedef void (*hat_rgn_cb_func_t)(caddr_t, caddr_t, caddr_t, 616 size_t, void *, u_offset_t); 617 618 void hat_join_srd(struct hat *, vnode_t *); 619 620 hat_region_cookie_t hat_join_region(struct hat *, caddr_t, size_t, void *, 621 u_offset_t, uchar_t, uchar_t, hat_rgn_cb_func_t, 622 uint_t); 623 void hat_leave_region(struct hat *, hat_region_cookie_t, 624 uint_t); 625 void hat_dup_region(struct hat *, hat_region_cookie_t); 626 627 #define HAT_INVALID_REGION_COOKIE ((hat_region_cookie_t)-1) 628 #define HAT_IS_REGION_COOKIE_VALID(c) ((c) != HAT_INVALID_REGION_COOKIE) 629 630 /* hat_join_region() flags */ 631 632 #define HAT_REGION_TEXT 0x1 /* passed by segvn */ 633 #define HAT_REGION_ISM 0x2 /* for hat_share()/hat_unshare() */ 634 635 #define HAT_REGION_TYPE_MASK (0x7) 636 637 #endif /* _KERNEL */ 638 639 #ifdef __cplusplus 640 } 641 #endif 642 643 #endif /* _VM_HAT_H */ 644