1 /*********************************************************************** 2 * * 3 * This software is part of the ast package * 4 * Copyright (c) 1985-2012 AT&T Intellectual Property * 5 * and is licensed under the * 6 * Eclipse Public License, Version 1.0 * 7 * by AT&T Intellectual Property * 8 * * 9 * A copy of the License is available at * 10 * http://www.eclipse.org/org/documents/epl-v10.html * 11 * (with md5 checksum b35adb5213ca9657e911e9befb180842) * 12 * * 13 * Information and Software Systems Research * 14 * AT&T Research * 15 * Florham Park NJ * 16 * * 17 * Glenn Fowler <gsf@research.att.com> * 18 * David Korn <dgk@research.att.com> * 19 * Phong Vo <kpv@research.att.com> * 20 * * 21 ***********************************************************************/ 22 #if defined(_UWIN) && defined(_BLD_ast) 23 24 void _STUB_vmmapopen(){} 25 26 #else 27 28 #include "vmhdr.h" 29 #include <sys/types.h> 30 #include <string.h> 31 #if _hdr_unistd 32 #include <unistd.h> 33 #endif 34 35 #undef ALIGN /* some sys/param.h define this */ 36 37 #include <sys/mman.h> /* mmap() headers */ 38 #include <sys/file.h> 39 #include <sys/stat.h> 40 #include <fcntl.h> 41 42 #include <sys/shm.h> /* shm headers */ 43 #include <sys/ipc.h> 44 45 #undef ALIGN 46 #define ALIGN sizeof(struct _align_s) 47 48 /* Create a region to allocate based on mmap() or shmget(). 49 ** Both ways provide for share memory allocation. 50 ** mmap() also allows for allocating persistent data. 51 ** 52 ** Written by Kiem-Phong Vo (kpv@research.att.com) 53 */ 54 55 #define MM_INIT 001 /* initialization mode */ 56 57 #define MM_RELEASE 010 /* release share mem */ 58 #define MM_CLEANUP 020 /* clean up resources */ 59 60 /* magic word signaling region is being initialized */ 61 #define MM_LETMEDOIT ((unsigned int)(('N'<<24) | ('B'<<16) | ('&'<<8) | ('I')) ) 62 63 /* magic word signaling file/segment is ready */ 64 #define MM_MAGIC ((unsigned int)(('P'<<24) | ('&'<<16) | ('N'<<8) | ('8')) ) 65 66 /* default mimimum region size */ 67 #define MM_MINSIZE (64*_Vmpagesize) 68 69 /* macros to get the data section and size */ 70 #define MMHEAD(file) ROUND(sizeof(Mmvm_t)+strlen(file), ALIGN) 71 #define MMDATA(mmvm) ((Vmuchar_t*)(mmvm)->base + MMHEAD(mmvm->file)) 72 #define MMSIZE(mmvm) ((mmvm)->size - MMHEAD(mmvm->file)) 73 74 #ifdef S_IRUSR 75 #define FILE_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH) 76 #else 77 #define FILE_MODE 0644 78 #endif 79 80 /* to store key/value pairs for application */ 81 typedef struct _mmuser_s Mmuser_t; 82 struct _mmuser_s 83 { Mmuser_t* next; /* link list */ 84 int key; /* identifying key */ 85 Void_t* val; /* associated value */ 86 }; 87 88 typedef struct _mmvm_s 89 { unsigned int magic; /* magic bytes */ 90 Void_t* base; /* address to map to */ 91 ssize_t size; /* total data size */ 92 ssize_t busy; /* amount in use */ 93 Mmuser_t* user; /* stored (key,val)'s */ 94 int proj; /* project number */ 95 char file[1];/* file name */ 96 } Mmvm_t; 97 98 typedef struct _mmdisc_s 99 { Vmdisc_t disc; /* Vmalloc discipline */ 100 int flag; /* various modes */ 101 Mmvm_t* mmvm; /* shared memory data */ 102 ssize_t size; /* desired file size */ 103 int shmid; /* ID of the shared mem */ 104 int proj; /* shm project ID */ 105 char file[1];/* backing store/ftok() */ 106 } Mmdisc_t; 107 108 #if DEBUG 109 #include <stdio.h> 110 #include <string.h> 111 int _vmmdump(Vmalloc_t* vm, int fd) 112 { 113 char mesg[1024]; 114 Mmdisc_t *mmdc = (Mmdisc_t*)vm->disc; 115 116 fd = fd < 0 ? 2 : fd; 117 sprintf(mesg, "File: %s\n", mmdc->file ); write(fd, mesg, strlen(mesg)); 118 sprintf(mesg, "Project: %10d\n", mmdc->proj); write(fd, mesg, strlen(mesg)); 119 sprintf(mesg, "Memory: %#010lx\n", mmdc->mmvm); write(fd, mesg, strlen(mesg)); 120 sprintf(mesg, "Size: %10d\n", mmdc->size); write(fd, mesg, strlen(mesg)); 121 sprintf(mesg, "Shmid: %10d\n", mmdc->shmid); write(fd, mesg, strlen(mesg)); 122 123 sprintf(mesg, "File header:\n"); write(fd, mesg, strlen(mesg)); 124 sprintf(mesg, "Magic: %10d\n", mmdc->mmvm->magic); write(fd, mesg, strlen(mesg)); 125 sprintf(mesg, "Base: %#010lx\n", mmdc->mmvm->base); write(fd, mesg, strlen(mesg)); 126 sprintf(mesg, "Size: %10d\n", mmdc->mmvm->size); write(fd, mesg, strlen(mesg)); 127 sprintf(mesg, "Busy: %10d\n", mmdc->mmvm->busy); write(fd, mesg, strlen(mesg)); 128 return 0; 129 } 130 #endif /*DEBUG*/ 131 132 /* fix the mapped address for a region */ 133 static Mmvm_t* mmfix(Mmvm_t* mmvm, Mmdisc_t* mmdc, int fd) 134 { 135 Void_t *base = mmvm->base; 136 ssize_t size = mmvm->size; 137 138 if(base != (Void_t*)mmvm) /* mmvm is not right yet */ 139 { /**/ASSERT(!base || (base && (VLONG(base)%_Vmpagesize) == 0) ); 140 if(mmdc->proj < 0) 141 { munmap((Void_t*)mmvm, size); 142 mmvm = (Mmvm_t*)mmap(base, size, (PROT_READ|PROT_WRITE), 143 (MAP_FIXED|MAP_SHARED), fd, (off_t)0 ); 144 } 145 else 146 { shmdt((Void_t*)mmvm); 147 mmvm = (Mmvm_t*)shmat(mmdc->shmid, base, 0); 148 } 149 if(!mmvm || mmvm == (Mmvm_t*)(-1) ) 150 mmvm = NIL(Mmvm_t*); 151 } 152 153 return mmvm; 154 } 155 156 /* initialize region data */ 157 static int mminit(Mmdisc_t* mmdc) 158 { 159 Void_t *base; 160 int try, k; 161 int fd = -1; 162 key_t key = -1; 163 ssize_t extent, size = 0; 164 Mmvm_t *mmvm = NIL(Mmvm_t*); 165 int rv = -1; 166 167 if(mmdc->mmvm) /* already done this */ 168 return 0; 169 170 /* fixed size region so make it reasonably large */ 171 if((size = mmdc->size) < MM_MINSIZE ) 172 size = MM_MINSIZE; 173 size += MMHEAD(mmdc->file) + ALIGN; 174 size = ROUND(size, _Vmpagesize); 175 176 /* this op can happen simultaneously in different processes */ 177 if((fd = open(mmdc->file, O_RDWR|O_CREAT, FILE_MODE)) < 0) 178 return -1; 179 180 /* get/create the initial segment of data */ 181 if(mmdc->proj < 0 ) /* proj < 0 means doing mmap() */ 182 { /* Note that the location being written to is always zero! */ 183 if((extent = (ssize_t)lseek(fd, (off_t)0, SEEK_END)) < 0) 184 goto done; 185 if(extent < size) /* make the file size large enough */ 186 if(lseek(fd, (off_t)size, 0) != (off_t)size || write(fd, "", 1) != 1 ) 187 goto done; 188 189 /* map the file into memory */ 190 mmvm = (Mmvm_t*)mmap(NIL(Void_t*), size, (PROT_READ|PROT_WRITE), 191 MAP_SHARED, fd, (off_t)0 ); 192 } 193 else 194 { /* make the key and get/create an id for the share mem segment */ 195 if((key = ftok(mmdc->file, mmdc->proj)) < 0 ) 196 goto done; 197 if((mmdc->shmid = shmget(key, size, IPC_CREAT|FILE_MODE)) < 0 ) 198 goto done; 199 200 /* map the data segment into memory */ 201 mmvm = (Mmvm_t*)shmat(mmdc->shmid, NIL(Void_t*), 0); 202 } 203 204 if(!mmvm || mmvm == (Mmvm_t*)(-1) ) /* initial mapping failed */ 205 goto done; 206 207 /* all processes compete for the chore to initialize data */ 208 if(asocasint(&mmvm->magic, 0, MM_LETMEDOIT) == 0 ) /* lucky winner: us! */ 209 { if(!(base = vmmaddress(size)) ) /* get a suitable base for the map */ 210 base = (Void_t*)mmvm; 211 mmdc->flag |= MM_INIT; 212 mmvm->base = base; 213 mmvm->size = size; 214 mmvm->busy = 0; 215 mmvm->proj = mmdc->proj; 216 strcpy(mmvm->file, mmdc->file); 217 if(mmdc->proj < 0 ) /* flush to file */ 218 msync((Void_t*)mmvm, MMHEAD(mmvm->file), MS_SYNC); 219 220 if(mmvm->base != (Void_t*)mmvm) /* not yet at the right address */ 221 if(!(mmvm = mmfix(mmvm, mmdc, fd)) ) 222 goto done; 223 rv = 0; /* success, return this value to indicate a new map */ 224 } 225 else /* wait for someone else to finish initialization */ 226 { /**/ASSERT(!(mmdc->flag&MM_INIT)); 227 if(mmvm->magic != MM_LETMEDOIT && mmvm->magic != MM_MAGIC) 228 goto done; 229 230 for(try = 0, k = 0;; ASOLOOP(k) ) /* waiting */ 231 { if(asocasint(&mmvm->magic, MM_MAGIC, MM_MAGIC) == MM_MAGIC ) 232 break; 233 else if((try += 1) <= 0 ) /* too many tries */ 234 goto done; 235 } 236 237 /* mapped the wrong memory */ 238 if(mmvm->proj != mmdc->proj || strcmp(mmvm->file, mmdc->file) != 0 ) 239 goto done; 240 241 if(mmvm->base != (Void_t*)mmvm) /* not yet at the right address */ 242 if(!(mmvm = mmfix(mmvm, mmdc, fd)) ) 243 goto done; 244 rv = 1; /* success, return this value to indicate a finished map */ 245 } 246 247 done: (void)close(fd); 248 249 if(rv >= 0 ) /* successful construction of region */ 250 { /**/ASSERT(mmvm && mmvm != (Mmvm_t*)(-1)); 251 mmdc->mmvm = mmvm; 252 } 253 else if(mmvm && mmvm != (Mmvm_t*)(-1)) /* error, remove map */ 254 { if(mmdc->proj < 0) 255 (void)munmap((Void_t*)mmvm, size); 256 else (void)shmdt((Void_t*)mmvm); 257 } 258 259 return rv; 260 } 261 262 #if __STD_C /* end a file mapping */ 263 static int mmend(Mmdisc_t* mmdc) 264 #else 265 static int mmend(mmdc) 266 Mmdisc_t* mmdc; 267 #endif 268 { 269 Mmvm_t *mmvm; 270 struct shmid_ds shmds; 271 272 if(!(mmvm = mmdc->mmvm) ) 273 return 0; 274 275 if(mmdc->proj < 0 ) 276 { (void)msync(mmvm->base, mmvm->size, MS_ASYNC); 277 if(mmdc->flag&MM_RELEASE) 278 { if(mmvm->base ) 279 (void)munmap(mmvm->base, mmvm->size); 280 } 281 if(mmdc->flag&MM_CLEANUP) 282 (void)unlink(mmdc->file); 283 } 284 else 285 { if(mmdc->flag&MM_RELEASE) 286 { if(mmvm->base ) 287 (void)shmdt(mmvm->base); 288 } 289 if(mmdc->flag&MM_CLEANUP) 290 { if(mmdc->shmid >= 0 ) 291 (void)shmctl(mmdc->shmid, IPC_RMID, &shmds); 292 } 293 } 294 295 mmdc->mmvm = NIL(Mmvm_t*); 296 return 0; 297 } 298 299 #if __STD_C 300 static Void_t* mmgetmem(Vmalloc_t* vm, Void_t* caddr, 301 size_t csize, size_t nsize, Vmdisc_t* disc) 302 #else 303 static Void_t* mmgetmem(vm, caddr, csize, nsize, disc) 304 Vmalloc_t* vm; 305 Void_t* caddr; 306 size_t csize; 307 size_t nsize; 308 Vmdisc_t* disc; 309 #endif 310 { 311 Mmvm_t *mmvm; 312 Mmdisc_t *mmdc = (Mmdisc_t*)disc; 313 314 if(!(mmvm = mmdc->mmvm) ) /* bad data */ 315 return NIL(Void_t*); 316 317 /* this region allows only a single busy block! */ 318 if(caddr) /* resizing/freeing an existing block */ 319 { if(caddr == MMDATA(mmvm) && nsize <= MMSIZE(mmvm) ) 320 { mmvm->busy = nsize; 321 return MMDATA(mmvm); 322 } 323 else return NIL(Void_t*); 324 } 325 else /* requesting a new block */ 326 { if(mmvm->busy == 0 ) 327 { mmvm->busy = nsize; 328 return MMDATA(mmvm); 329 } 330 else return NIL(Void_t*); 331 } 332 } 333 334 #if __STD_C 335 static int mmexcept(Vmalloc_t* vm, int type, Void_t* data, Vmdisc_t* disc) 336 #else 337 static int mmexcept(vm, type, data, disc) 338 Vmalloc_t* vm; 339 int type; 340 Void_t* data; 341 Vmdisc_t* disc; 342 #endif 343 { 344 int rv; 345 Void_t *base; 346 Mmdisc_t *mmdc = (Mmdisc_t*)disc; 347 348 if(type == VM_OPEN) 349 { if(data) /* VM_OPEN event at start of vmopen() */ 350 { if((rv = mminit(mmdc)) < 0 ) /* initialization failed */ 351 return -1; 352 else if(rv == 0) /* just started a new map */ 353 { /**/ASSERT(mmdc->flag&MM_INIT); 354 /**/ASSERT(mmdc->mmvm->magic == MM_LETMEDOIT); 355 return 0; 356 } 357 else /* an existing map was reconstructed */ 358 { /**/ASSERT(!(mmdc->flag&MM_INIT)); 359 /**/ASSERT(mmdc->mmvm->magic == MM_MAGIC); 360 *((Void_t**)data) = MMDATA(mmdc->mmvm); 361 return 1; 362 } 363 } 364 else return 0; 365 } 366 else if(type == VM_ENDOPEN) /* at end of vmopen() */ 367 { if(mmdc->flag&MM_INIT) /* this is the initializing process! */ 368 { /**/ASSERT(mmdc->mmvm->magic == MM_LETMEDOIT); 369 asocasint(&mmdc->mmvm->magic, MM_LETMEDOIT, MM_MAGIC); 370 371 if(mmdc->proj < 0) /* sync data to file now */ 372 msync((Void_t*)mmdc->mmvm, MMHEAD(mmdc->file), MS_SYNC); 373 } /**/ASSERT(mmdc->mmvm->magic == MM_MAGIC); 374 return 0; 375 } 376 else if(type == VM_CLOSE) 377 return 1; /* tell vmclose not to free memory segments */ 378 else if(type == VM_ENDCLOSE) /* this is the final closing event */ 379 { (void)mmend(mmdc); 380 (void)vmfree(Vmheap, mmdc); 381 return 0; /* all done */ 382 } 383 else return 0; 384 } 385 386 #if __STD_C 387 Vmalloc_t* vmmopen(char* file, int proj, ssize_t size ) 388 #else 389 Vmalloc_t* vmmopen(file, proj, size ) 390 char* file; /* file for key or data backing */ 391 int proj; /* project ID, < 0 doing mmap */ 392 ssize_t size; /* desired size for mem segment */ 393 #endif 394 { 395 Vmalloc_t *vm; 396 Mmdisc_t *mmdc; 397 398 GETPAGESIZE(_Vmpagesize); 399 400 if(!file || !file[0] ) 401 return NIL(Vmalloc_t*); 402 403 /* create discipline structure for getting memory from mmap */ 404 if(!(mmdc = vmalloc(Vmheap, sizeof(Mmdisc_t)+strlen(file))) ) 405 return NIL(Vmalloc_t*); 406 memset(mmdc, 0, sizeof(Mmdisc_t)); 407 mmdc->disc.memoryf = mmgetmem; 408 mmdc->disc.exceptf = mmexcept; 409 mmdc->disc.round = _Vmpagesize; /* round request to this size */ 410 mmdc->mmvm = NIL(Mmvm_t*); 411 mmdc->size = size; 412 mmdc->shmid = -1; 413 mmdc->flag = 0; 414 mmdc->proj = proj; 415 strcpy(mmdc->file, file); 416 417 /* now open the Vmalloc_t handle to return to application */ 418 if(!(vm = vmopen(&mmdc->disc, Vmbest, VM_SHARE)) ) 419 { (void)mmend(mmdc); 420 (void)vmfree(Vmheap, mmdc); 421 return NIL(Vmalloc_t*); 422 } 423 else 424 { /**/ASSERT(mmdc->mmvm && mmdc->mmvm->magic == MM_MAGIC); 425 return vm; 426 } 427 } 428 429 /* to store (key,value) data in the map */ 430 #if __STD_C 431 Void_t* vmmvalue(Vmalloc_t* vm, int key, Void_t* val, int oper) 432 #else 433 Void_t* vmmvalue(vm, key, val, oper) 434 Vmalloc_t* vm; /* a region based on vmmapopen */ 435 int key; /* key of data to be set */ 436 Void_t* val; /* data to be set */ 437 int oper; /* operation type */ 438 #endif 439 { 440 Mmuser_t *u; 441 Mmdisc_t *mmdc = (Mmdisc_t*)vm->disc; 442 Mmvm_t *mmvm = mmdc->mmvm; 443 444 /* check to see if operation is well-defined */ 445 if(oper != VM_MMGET && oper != VM_MMSET && oper != VM_MMADD) 446 return NIL(Void_t*); 447 448 SETLOCK(vm, 0); 449 450 /* find the key */ 451 for(u = mmvm->user; u; u = u->next) 452 if(u->key == key) 453 break; 454 455 if(!u && (oper == VM_MMSET || oper == VM_MMADD) ) 456 { if((u = KPVALLOC(vm, sizeof(Mmuser_t), vm->meth.allocf)) ) 457 { u->val = NIL(Void_t*); 458 u->key = key; 459 u->next = mmvm->user; 460 mmvm->user = u; 461 } 462 } 463 464 if(u) /* update data and set value to return */ 465 { if(oper == VM_MMSET) 466 u->val = val; 467 else if(oper == VM_MMADD) 468 u->val = (Void_t*)((long)(u->val) + (long)(val)); 469 val = u->val; 470 } 471 else val = NIL(Void_t*); 472 473 CLRLOCK(vm, 0); 474 475 return val; 476 } 477 478 void vmmrelease(Vmalloc_t* vm, int type) 479 { 480 Mmdisc_t *mmdc = (Mmdisc_t*)vm->disc; 481 482 mmdc->flag |= MM_RELEASE; 483 if(type > 0) 484 mmdc->flag |= MM_CLEANUP; 485 } 486 487 /* suggest an address usable for mapping memory */ 488 Void_t* vmmaddress(size_t size) 489 { 490 #if !defined(_map_min) || !defined(_map_max) || !defined(_map_dir) 491 return NIL(Void_t*); 492 #else 493 Void_t *avail; 494 static Vmuchar_t *min = (Vmuchar_t*)_map_min; 495 static Vmuchar_t *max = (Vmuchar_t*)_map_max; 496 497 GETPAGESIZE(_Vmpagesize); 498 size = ROUND(size, _Vmpagesize); 499 500 if(_map_dir == 0 || (min+size) > max) 501 avail = NIL(Void_t*); 502 else if(_map_dir > 0) 503 { avail = (Void_t*)min; 504 min += size; 505 } 506 else 507 { max -= size; 508 avail = (Void_t*)max; 509 } 510 511 return avail; 512 #endif 513 } 514 515 #endif 516