1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 30 #include <sys/types.h> 31 #include <sys/cmn_err.h> 32 #include <sys/mman.h> 33 #include <sys/systm.h> 34 #include <vm/xhat.h> 35 #include <vm/page.h> 36 #include <vm/as.h> 37 38 int xhat_debug = 0; 39 40 krwlock_t xhat_provider_rwlock; 41 xhat_provider_t *xhat_provider = NULL; 42 43 void 44 xhat_init() 45 { 46 rw_init(&xhat_provider_rwlock, NULL, RW_DEFAULT, NULL); 47 } 48 49 50 51 int 52 xhat_provider_register(xhat_provider_t *provider) 53 { 54 /* strlen("_cache") = 7 */ 55 char cache_name[XHAT_CACHE_NAMELEN + 7]; 56 57 58 if (provider->xhat_provider_version != XHAT_PROVIDER_VERSION) { 59 cmn_err(CE_WARN, "XHAT provider version mismatch"); 60 return (-1); 61 } 62 63 if ((XHAT_POPS(provider)->xhat_alloc == NULL) || 64 (XHAT_POPS(provider)->xhat_free == NULL)) { 65 cmn_err(CE_WARN, "Malformed XHAT provider"); 66 return (-1); 67 } 68 69 /* Allocate kmem_cache which will manage xhat blocks */ 70 provider->xblkcache->free_blks = NULL; 71 (void) strncpy(cache_name, provider->xhat_provider_name, 72 XHAT_CACHE_NAMELEN); 73 (void) strcat(cache_name, "_cache"); 74 provider->xblkcache->cache = kmem_cache_create(cache_name, 75 provider->xhat_provider_blk_size, 0, NULL, NULL, 76 provider->xblkcache->reclaim, 77 (void *)provider, NULL, 0); 78 if (provider->xblkcache->cache == NULL) { 79 cmn_err(CE_WARN, "Failed to allocate cache for %s", 80 provider->xhat_provider_name); 81 return (-1); 82 } 83 84 mutex_init(&provider->xblkcache->lock, NULL, MUTEX_DEFAULT, NULL); 85 86 87 /* Insert provider in the global list */ 88 rw_enter(&xhat_provider_rwlock, RW_WRITER); 89 provider->next = xhat_provider; 90 provider->prev = NULL; 91 if (xhat_provider) 92 xhat_provider->prev = provider; 93 xhat_provider = provider; 94 xhat_provider->xhat_provider_refcnt = 0; 95 rw_exit(&xhat_provider_rwlock); 96 return (0); 97 } 98 99 100 101 int 102 xhat_provider_unregister(xhat_provider_t *provider) 103 { 104 if (provider->xhat_provider_version != XHAT_PROVIDER_VERSION) 105 return (-1); 106 107 rw_enter(&xhat_provider_rwlock, RW_WRITER); 108 109 if (provider->xhat_provider_refcnt) { 110 rw_exit(&xhat_provider_rwlock); 111 return (-1); 112 } 113 114 if (provider->next) 115 provider->next->prev = provider->prev; 116 if (provider->prev) 117 provider->prev->next = provider->next; 118 else 119 xhat_provider = provider->next; 120 provider->prev = NULL; 121 provider->next = NULL; 122 rw_exit(&xhat_provider_rwlock); 123 124 /* Free all xblks that are sitting on free_blks list */ 125 provider->xblkcache->reclaim(provider); 126 127 kmem_cache_destroy(provider->xblkcache->cache); 128 129 return (0); 130 } 131 132 133 134 /* Attaches an XHAT to the address space */ 135 int 136 xhat_attach_xhat(xhat_provider_t *provider, struct as *as, 137 struct xhat **xhatp, void *arg) 138 { 139 struct xhat *xh; 140 141 142 143 xh = XHAT_POPS(provider)->xhat_alloc(arg); 144 if (xh == NULL) { 145 *xhatp = NULL; 146 return (XH_PRVDR); 147 } 148 149 mutex_init(&xh->xhat_lock, NULL, MUTEX_DEFAULT, NULL); 150 xh->xhat_provider = provider; 151 152 rw_enter(&xhat_provider_rwlock, RW_WRITER); 153 provider->xhat_provider_refcnt++; 154 rw_exit(&xhat_provider_rwlock); 155 156 mutex_enter(&as->a_contents); 157 158 /* Is address space busy (being freed, dup'd or swapped)? */ 159 if (AS_ISBUSY(as)) { 160 mutex_exit(&as->a_contents); 161 XHAT_POPS(provider)->xhat_free(xh); 162 163 rw_enter(&xhat_provider_rwlock, RW_WRITER); 164 provider->xhat_provider_refcnt--; 165 rw_exit(&xhat_provider_rwlock); 166 167 *xhatp = NULL; 168 return (XH_ASBUSY); 169 } 170 171 xh->xhat_as = as; 172 xh->xhat_refcnt = 0; 173 xh->holder = NULL; 174 xh->arg = arg; 175 xh->next = (struct xhat *)as->a_xhat; 176 if (xh->next) 177 xh->next->prev = xh; 178 as->a_xhat = xh; 179 mutex_exit(&as->a_contents); 180 *xhatp = xh; 181 return (0); 182 } 183 184 185 int 186 xhat_detach_xhat(xhat_provider_t *provider, struct as *as) 187 { 188 struct xhat *xh; 189 190 191 mutex_enter(&as->a_contents); 192 193 for (xh = (struct xhat *)as->a_xhat; xh != NULL; xh = xh->next) 194 if (xh->xhat_provider == provider) { 195 196 197 if (xh->holder != NULL) { 198 /* 199 * The address space is being freed, 200 * dup'd or swapped out. 201 * If we are the thread which doing one 202 * of those operations, we can go ahead 203 * and free up the XHAT. 204 * Otherwise, return. 205 */ 206 if (xh->holder != curthread) { 207 mutex_exit(&as->a_contents); 208 return (XH_ASBUSY); 209 } else 210 xhat_hat_rele(xh); 211 } 212 213 if (xh->xhat_refcnt > 0) { 214 /* 215 * There are still "users" of the XHAT. 216 * This may be either because the caller 217 * forgot to free something up (which is a bug) 218 * or because xhat_op_all() is in progress. 219 * Since we are not allowing any of 220 * xhat_op_all's ops to call xhat_detach_xhat(), 221 * This can only be some other thread. It 222 * may want to wait a bit and retry. 223 */ 224 225 226 /* Restore the hold on the XHAT */ 227 if (xh->holder == curthread) 228 xhat_hat_hold(xh); 229 230 mutex_exit(&as->a_contents); 231 return (XH_XHHELD); 232 } 233 234 rw_enter(&xhat_provider_rwlock, RW_WRITER); 235 provider->xhat_provider_refcnt--; 236 rw_exit(&xhat_provider_rwlock); 237 238 if (xh->next) 239 xh->next->prev = xh->prev; 240 if (xh->prev) 241 xh->prev->next = xh->next; 242 else 243 as->a_xhat = (void *) xh->next; 244 mutex_exit(&as->a_contents); 245 246 XHAT_POPS(provider)->xhat_free(xh); 247 248 return (0); 249 } 250 mutex_exit(&as->a_contents); 251 return (XH_NOTATTCHD); 252 } 253 254 void 255 xhat_hat_hold(struct xhat *xhat) 256 { 257 mutex_enter(&xhat->xhat_lock); 258 xhat->xhat_refcnt++; 259 mutex_exit(&xhat->xhat_lock); 260 } 261 262 void 263 xhat_hat_rele(struct xhat *xhat) 264 { 265 mutex_enter(&xhat->xhat_lock); 266 xhat->xhat_refcnt--; 267 ASSERT(xhat->xhat_refcnt >= 0); 268 mutex_exit(&xhat->xhat_lock); 269 } 270 271 272 int 273 xhat_hat_holders(struct xhat *xhat) 274 { 275 return (xhat->xhat_refcnt); 276 } 277 278 279 /* 280 * Assumes that address space is already locked 281 * and that AS_FREE is set for as->a_flags. 282 */ 283 void 284 xhat_free_start_all(struct as *as) 285 { 286 struct xhat *xh, *xh_nxt; 287 288 289 ASSERT(AS_ISBUSY(as)); 290 291 mutex_enter(&as->a_contents); 292 xh = (struct xhat *)as->a_xhat; 293 294 /* 295 * Simply calling xhat_hat_hold() won't work because we will 296 * not be able to succeed in xhat_detach_xhat(), which may 297 * get called from here. We need to know _who_ the holder is. 298 */ 299 if (xh != NULL) { 300 xhat_hat_hold(xh); 301 ASSERT(xh->holder == NULL); 302 xh->holder = curthread; 303 } 304 305 while (xh != NULL) { 306 307 xh_nxt = xh->next; 308 if (xh_nxt != NULL) { 309 ASSERT(xh_nxt->holder == NULL); 310 xhat_hat_hold(xh_nxt); 311 xh_nxt->holder = curthread; 312 } 313 314 mutex_exit(&as->a_contents); 315 316 XHAT_FREE_START(xh); 317 318 mutex_enter(&as->a_contents); 319 320 xh = xh_nxt; 321 } 322 323 mutex_exit(&as->a_contents); 324 } 325 326 327 328 /* 329 * Assumes that address space is already locked. 330 * Since xhat_free_start_all() must have been called 331 * earlier, for all XHATs holder is set to curthread. 332 * Also, since AS_BUSY is set for as->a_flags, no new 333 * XHATs could have been added. 334 */ 335 void 336 xhat_free_end_all(struct as *as) 337 { 338 339 struct xhat *xh, *xh_nxt; 340 341 ASSERT(AS_ISBUSY(as)); 342 343 mutex_enter(&as->a_contents); 344 xh = (struct xhat *)as->a_xhat; 345 346 347 while (xh != NULL) { 348 349 ASSERT(xh->holder == curthread); 350 351 xh_nxt = xh->next; 352 353 mutex_exit(&as->a_contents); 354 355 XHAT_FREE_END(xh); 356 357 mutex_enter(&as->a_contents); 358 359 xh = xh_nxt; 360 } 361 362 mutex_exit(&as->a_contents); 363 } 364 365 366 /* Assumes that address space is already locked */ 367 368 /* ARGSUSED */ 369 int 370 xhat_dup_all(struct as *as, struct as *newas, caddr_t addr, size_t len, 371 uint_t flag) 372 { 373 /* This is not supported. Should we return some sort of error? */ 374 375 ASSERT(AS_ISBUSY(as)); 376 377 return (0); 378 } 379 380 381 /* Assumes that address space is already locked */ 382 void 383 xhat_swapout_all(struct as *as) 384 { 385 struct xhat *xh, *xh_nxt; 386 387 388 ASSERT(AS_ISBUSY(as)); 389 390 mutex_enter(&as->a_contents); 391 xh = (struct xhat *)as->a_xhat; 392 393 if (xh != NULL) { 394 xhat_hat_hold(xh); 395 ASSERT(xh->holder == NULL); 396 xh->holder = curthread; 397 } 398 399 400 while (xh != NULL) { 401 402 xh_nxt = xh->next; 403 if (xh_nxt != NULL) { 404 ASSERT(xh_nxt->holder == NULL); 405 xhat_hat_hold(xh_nxt); 406 xh_nxt->holder = curthread; 407 } 408 409 mutex_exit(&as->a_contents); 410 411 XHAT_SWAPOUT(xh); 412 413 mutex_enter(&as->a_contents); 414 415 /* 416 * If the xh is still there (i.e. swapout did not 417 * destroy it), clear the holder field. 418 * xh_nxt->prev couldn't have been changed in xhat_attach_xhat() 419 * because AS_BUSY is set. xhat_detach_xhat() also couldn't 420 * have modified it because (holder != NULL). 421 * If there is only one XHAT, just see if a_xhat still 422 * points to us. 423 */ 424 if (((xh_nxt != NULL) && (xh_nxt->prev == xh)) || 425 ((as->a_xhat != NULL) && (as->a_xhat == xh))) { 426 xhat_hat_rele(xh); 427 xh->holder = NULL; 428 } 429 430 xh = xh_nxt; 431 } 432 433 mutex_exit(&as->a_contents); 434 } 435 436 437 438 439 /* 440 * In the following routines, the appropriate xhat_op 441 * should never attempt to call xhat_detach_xhat(): it will 442 * never succeed since the XHAT is held. 443 */ 444 445 446 #define XHAT_UNLOAD_CALLBACK_OP (0) 447 #define XHAT_SETATTR_OP (1) 448 #define XHAT_CLRATTR_OP (2) 449 #define XHAT_CHGATTR_OP (3) 450 #define XHAT_CHGPROT_OP (4) 451 #define XHAT_UNSHARE_OP (5) 452 453 454 static void 455 xhat_op_all(int op, struct as *as, caddr_t addr, 456 size_t len, uint_t flags, void *ptr) 457 { 458 struct xhat *xh, *xh_nxt; 459 460 mutex_enter(&as->a_contents); 461 xh = (struct xhat *)as->a_xhat; 462 463 while (xh != NULL) { 464 465 xhat_hat_hold(xh); 466 467 xh_nxt = xh->next; 468 if (xh_nxt != NULL) 469 xhat_hat_hold(xh_nxt); 470 471 mutex_exit(&as->a_contents); 472 473 switch (op) { 474 case XHAT_UNLOAD_CALLBACK_OP: 475 XHAT_UNLOAD_CALLBACK(xh, addr, 476 len, flags, (hat_callback_t *)ptr); 477 break; 478 case XHAT_SETATTR_OP: 479 XHAT_SETATTR(xh, addr, len, flags); 480 break; 481 case XHAT_CLRATTR_OP: 482 XHAT_CLRATTR(xh, addr, len, flags); 483 break; 484 case XHAT_CHGATTR_OP: 485 XHAT_CHGATTR(xh, addr, len, flags); 486 break; 487 case XHAT_CHGPROT_OP: 488 XHAT_CHGPROT(xh, addr, len, flags); 489 break; 490 case XHAT_UNSHARE_OP: 491 XHAT_UNSHARE(xh, addr, len); 492 break; 493 default: 494 panic("Unknown op %d in xhat_op_all", op); 495 } 496 497 mutex_enter(&as->a_contents); 498 499 /* 500 * Both pointers are still valid because both 501 * XHATs are held. 502 */ 503 xhat_hat_rele(xh); 504 if (xh_nxt != NULL) 505 xhat_hat_rele(xh_nxt); 506 xh = xh_nxt; 507 } 508 509 mutex_exit(&as->a_contents); 510 } 511 512 513 514 void 515 xhat_unload_callback_all(struct as *as, caddr_t addr, size_t len, uint_t flags, 516 hat_callback_t *callback) 517 { 518 xhat_op_all(XHAT_UNLOAD_CALLBACK_OP, as, addr, len, flags, callback); 519 } 520 521 522 void 523 xhat_setattr_all(struct as *as, caddr_t addr, size_t len, uint_t attr) 524 { 525 xhat_op_all(XHAT_SETATTR_OP, as, addr, len, attr, NULL); 526 } 527 528 529 530 void 531 xhat_clrattr_all(struct as *as, caddr_t addr, size_t len, uint_t attr) 532 { 533 xhat_op_all(XHAT_CLRATTR_OP, as, addr, len, attr, NULL); 534 } 535 536 537 void 538 xhat_chgattr_all(struct as *as, caddr_t addr, size_t len, uint_t attr) 539 { 540 xhat_op_all(XHAT_CHGATTR_OP, as, addr, len, attr, NULL); 541 } 542 543 544 void 545 xhat_chgprot_all(struct as *as, caddr_t addr, size_t len, uint_t prot) 546 { 547 xhat_op_all(XHAT_CHGPROT_OP, as, addr, len, prot, NULL); 548 } 549 550 551 void 552 xhat_unshare_all(struct as *as, caddr_t addr, size_t len) 553 { 554 xhat_op_all(XHAT_UNSHARE_OP, as, addr, len, 0, NULL); 555 } 556