1 /*- 2 * Copyright (c) 2002 Doug Rabson 3 * Copyright (c) 1994-1995 S�ren Schmidt 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_mac.h" 34 35 #include <sys/param.h> 36 #include <sys/blist.h> 37 #include <sys/fcntl.h> 38 #if defined(__i386__) || defined(__alpha__) 39 #include <sys/imgact_aout.h> 40 #endif 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/limits.h> 44 #include <sys/lock.h> 45 #include <sys/mac.h> 46 #include <sys/malloc.h> 47 #include <sys/mman.h> 48 #include <sys/mount.h> 49 #include <sys/mutex.h> 50 #include <sys/namei.h> 51 #include <sys/proc.h> 52 #include <sys/reboot.h> 53 #include <sys/resourcevar.h> 54 #include <sys/signalvar.h> 55 #include <sys/stat.h> 56 #include <sys/syscallsubr.h> 57 #include <sys/sysctl.h> 58 #include <sys/sysproto.h> 59 #include <sys/systm.h> 60 #include <sys/time.h> 61 #include <sys/vmmeter.h> 62 #include <sys/vnode.h> 63 #include <sys/wait.h> 64 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 #include <vm/vm_kern.h> 68 #include <vm/vm_map.h> 69 #include <vm/vm_extern.h> 70 #include <vm/vm_object.h> 71 #include <vm/swap_pager.h> 72 73 #include <posix4/sched.h> 74 75 #include "opt_compat.h" 76 77 #ifdef COMPAT_LINUX32 78 #include <machine/../linux32/linux.h> 79 #include <machine/../linux32/linux32_proto.h> 80 #else 81 #include <machine/../linux/linux.h> 82 #include <machine/../linux/linux_proto.h> 83 #endif 84 85 #include <compat/linux/linux_mib.h> 86 #include <compat/linux/linux_util.h> 87 88 #ifdef __i386__ 89 #include <machine/cputypes.h> 90 #endif 91 92 #ifdef __alpha__ 93 #define BSD_TO_LINUX_SIGNAL(sig) (sig) 94 #else 95 #define BSD_TO_LINUX_SIGNAL(sig) \ 96 (((sig) <= LINUX_SIGTBLSZ) ? bsd_to_linux_signal[_SIG_IDX(sig)] : sig) 97 #endif 98 99 #ifndef __alpha__ 100 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 101 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 102 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 103 RLIMIT_MEMLOCK, -1 104 }; 105 #endif /*!__alpha__*/ 106 107 struct l_sysinfo { 108 l_long uptime; /* Seconds since boot */ 109 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 110 #define LINUX_SYSINFO_LOADS_SCALE 65536 111 l_ulong totalram; /* Total usable main memory size */ 112 l_ulong freeram; /* Available memory size */ 113 l_ulong sharedram; /* Amount of shared memory */ 114 l_ulong bufferram; /* Memory used by buffers */ 115 l_ulong totalswap; /* Total swap space size */ 116 l_ulong freeswap; /* swap space still available */ 117 l_ushort procs; /* Number of current processes */ 118 l_ulong totalbig; 119 l_ulong freebig; 120 l_uint mem_unit; 121 char _f[6]; /* Pads structure to 64 bytes */ 122 }; 123 #ifndef __alpha__ 124 int 125 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 126 { 127 struct l_sysinfo sysinfo; 128 vm_object_t object; 129 int i, j; 130 struct timespec ts; 131 132 /* Uptime is copied out of print_uptime() in kern_shutdown.c */ 133 getnanouptime(&ts); 134 i = 0; 135 if (ts.tv_sec >= 86400) { 136 ts.tv_sec %= 86400; 137 i = 1; 138 } 139 if (i || ts.tv_sec >= 3600) { 140 ts.tv_sec %= 3600; 141 i = 1; 142 } 143 if (i || ts.tv_sec >= 60) { 144 ts.tv_sec %= 60; 145 i = 1; 146 } 147 sysinfo.uptime=ts.tv_sec; 148 149 /* Use the information from the mib to get our load averages */ 150 for (i = 0; i < 3; i++) 151 sysinfo.loads[i] = averunnable.ldavg[i] * 152 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 153 154 sysinfo.totalram = physmem * PAGE_SIZE; 155 sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE; 156 157 sysinfo.sharedram = 0; 158 mtx_lock(&vm_object_list_mtx); 159 TAILQ_FOREACH(object, &vm_object_list, object_list) 160 if (object->shadow_count > 1) 161 sysinfo.sharedram += object->resident_page_count; 162 mtx_unlock(&vm_object_list_mtx); 163 164 sysinfo.sharedram *= PAGE_SIZE; 165 sysinfo.bufferram = 0; 166 167 swap_pager_status(&i, &j); 168 sysinfo.totalswap= i * PAGE_SIZE; 169 sysinfo.freeswap = (i - j) * PAGE_SIZE; 170 171 sysinfo.procs = nprocs; 172 173 /* The following are only present in newer Linux kernels. */ 174 sysinfo.totalbig = 0; 175 sysinfo.freebig = 0; 176 sysinfo.mem_unit = 1; 177 178 return copyout(&sysinfo, args->info, sizeof(sysinfo)); 179 } 180 #endif /*!__alpha__*/ 181 182 #ifndef __alpha__ 183 int 184 linux_alarm(struct thread *td, struct linux_alarm_args *args) 185 { 186 struct itimerval it, old_it; 187 struct timeval tv; 188 struct proc *p; 189 190 #ifdef DEBUG 191 if (ldebug(alarm)) 192 printf(ARGS(alarm, "%u"), args->secs); 193 #endif 194 195 if (args->secs > 100000000) 196 return EINVAL; 197 198 it.it_value.tv_sec = (long)args->secs; 199 it.it_value.tv_usec = 0; 200 it.it_interval.tv_sec = 0; 201 it.it_interval.tv_usec = 0; 202 p = td->td_proc; 203 PROC_LOCK(p); 204 old_it = p->p_realtimer; 205 getmicrouptime(&tv); 206 if (timevalisset(&old_it.it_value)) 207 callout_stop(&p->p_itcallout); 208 if (it.it_value.tv_sec != 0) { 209 callout_reset(&p->p_itcallout, tvtohz(&it.it_value), 210 realitexpire, p); 211 timevaladd(&it.it_value, &tv); 212 } 213 p->p_realtimer = it; 214 PROC_UNLOCK(p); 215 if (timevalcmp(&old_it.it_value, &tv, >)) { 216 timevalsub(&old_it.it_value, &tv); 217 if (old_it.it_value.tv_usec != 0) 218 old_it.it_value.tv_sec++; 219 td->td_retval[0] = old_it.it_value.tv_sec; 220 } 221 return 0; 222 } 223 #endif /*!__alpha__*/ 224 225 int 226 linux_brk(struct thread *td, struct linux_brk_args *args) 227 { 228 struct vmspace *vm = td->td_proc->p_vmspace; 229 vm_offset_t new, old; 230 struct obreak_args /* { 231 char * nsize; 232 } */ tmp; 233 234 #ifdef DEBUG 235 if (ldebug(brk)) 236 printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend); 237 #endif 238 old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); 239 new = (vm_offset_t)args->dsend; 240 tmp.nsize = (char *) new; 241 if (((caddr_t)new > vm->vm_daddr) && !obreak(td, &tmp)) 242 td->td_retval[0] = (long)new; 243 else 244 td->td_retval[0] = (long)old; 245 246 return 0; 247 } 248 249 #if defined(__i386__) || defined(__alpha__) 250 251 int 252 linux_uselib(struct thread *td, struct linux_uselib_args *args) 253 { 254 struct nameidata ni; 255 struct vnode *vp; 256 struct exec *a_out; 257 struct vattr attr; 258 vm_offset_t vmaddr; 259 unsigned long file_offset; 260 vm_offset_t buffer; 261 unsigned long bss_size; 262 char *library; 263 int error; 264 int locked; 265 266 LCONVPATHEXIST(td, args->library, &library); 267 268 #ifdef DEBUG 269 if (ldebug(uselib)) 270 printf(ARGS(uselib, "%s"), library); 271 #endif 272 273 a_out = NULL; 274 locked = 0; 275 vp = NULL; 276 277 /* 278 * XXX: This code should make use of vn_open(), rather than doing 279 * all this stuff itself. 280 */ 281 NDINIT(&ni, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, library, td); 282 error = namei(&ni); 283 LFREEPATH(library); 284 if (error) 285 goto cleanup; 286 287 vp = ni.ni_vp; 288 /* 289 * XXX - This looks like a bogus check. A LOCKLEAF namei should not 290 * succeed without returning a vnode. 291 */ 292 if (vp == NULL) { 293 error = ENOEXEC; /* ?? */ 294 goto cleanup; 295 } 296 NDFREE(&ni, NDF_ONLY_PNBUF); 297 298 /* 299 * From here on down, we have a locked vnode that must be unlocked. 300 */ 301 locked++; 302 303 /* Writable? */ 304 if (vp->v_writecount) { 305 error = ETXTBSY; 306 goto cleanup; 307 } 308 309 /* Executable? */ 310 error = VOP_GETATTR(vp, &attr, td->td_ucred, td); 311 if (error) 312 goto cleanup; 313 314 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 315 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 316 error = ENOEXEC; 317 goto cleanup; 318 } 319 320 /* Sensible size? */ 321 if (attr.va_size == 0) { 322 error = ENOEXEC; 323 goto cleanup; 324 } 325 326 /* Can we access it? */ 327 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 328 if (error) 329 goto cleanup; 330 331 /* 332 * XXX: This should use vn_open() so that it is properly authorized, 333 * and to reduce code redundancy all over the place here. 334 */ 335 #ifdef MAC 336 error = mac_check_vnode_open(td->td_ucred, vp, FREAD); 337 if (error) 338 goto cleanup; 339 #endif 340 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1); 341 if (error) 342 goto cleanup; 343 344 /* Pull in executable header into kernel_map */ 345 error = vm_mmap(kernel_map, (vm_offset_t *)&a_out, PAGE_SIZE, 346 VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 0); 347 /* 348 * Lock no longer needed 349 */ 350 locked = 0; 351 VOP_UNLOCK(vp, 0, td); 352 353 if (error) 354 goto cleanup; 355 356 /* Is it a Linux binary ? */ 357 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 358 error = ENOEXEC; 359 goto cleanup; 360 } 361 362 /* 363 * While we are here, we should REALLY do some more checks 364 */ 365 366 /* Set file/virtual offset based on a.out variant. */ 367 switch ((int)(a_out->a_magic & 0xffff)) { 368 case 0413: /* ZMAGIC */ 369 file_offset = 1024; 370 break; 371 case 0314: /* QMAGIC */ 372 file_offset = 0; 373 break; 374 default: 375 error = ENOEXEC; 376 goto cleanup; 377 } 378 379 bss_size = round_page(a_out->a_bss); 380 381 /* Check various fields in header for validity/bounds. */ 382 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 383 error = ENOEXEC; 384 goto cleanup; 385 } 386 387 /* text + data can't exceed file size */ 388 if (a_out->a_data + a_out->a_text > attr.va_size) { 389 error = EFAULT; 390 goto cleanup; 391 } 392 393 /* 394 * text/data/bss must not exceed limits 395 * XXX - this is not complete. it should check current usage PLUS 396 * the resources needed by this library. 397 */ 398 PROC_LOCK(td->td_proc); 399 if (a_out->a_text > maxtsiz || 400 a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA)) { 401 PROC_UNLOCK(td->td_proc); 402 error = ENOMEM; 403 goto cleanup; 404 } 405 PROC_UNLOCK(td->td_proc); 406 407 mp_fixme("Unlocked vflags access."); 408 /* prevent more writers */ 409 vp->v_vflag |= VV_TEXT; 410 411 /* 412 * Check if file_offset page aligned. Currently we cannot handle 413 * misalinged file offsets, and so we read in the entire image 414 * (what a waste). 415 */ 416 if (file_offset & PAGE_MASK) { 417 #ifdef DEBUG 418 printf("uselib: Non page aligned binary %lu\n", file_offset); 419 #endif 420 /* Map text+data read/write/execute */ 421 422 /* a_entry is the load address and is page aligned */ 423 vmaddr = trunc_page(a_out->a_entry); 424 425 /* get anon user mapping, read+write+execute */ 426 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 427 &vmaddr, a_out->a_text + a_out->a_data, FALSE, VM_PROT_ALL, 428 VM_PROT_ALL, 0); 429 if (error) 430 goto cleanup; 431 432 /* map file into kernel_map */ 433 error = vm_mmap(kernel_map, &buffer, 434 round_page(a_out->a_text + a_out->a_data + file_offset), 435 VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 436 trunc_page(file_offset)); 437 if (error) 438 goto cleanup; 439 440 /* copy from kernel VM space to user space */ 441 error = copyout(PTRIN(buffer + file_offset), 442 (void *)vmaddr, a_out->a_text + a_out->a_data); 443 444 /* release temporary kernel space */ 445 vm_map_remove(kernel_map, buffer, buffer + 446 round_page(a_out->a_text + a_out->a_data + file_offset)); 447 448 if (error) 449 goto cleanup; 450 } else { 451 #ifdef DEBUG 452 printf("uselib: Page aligned binary %lu\n", file_offset); 453 #endif 454 /* 455 * for QMAGIC, a_entry is 20 bytes beyond the load address 456 * to skip the executable header 457 */ 458 vmaddr = trunc_page(a_out->a_entry); 459 460 /* 461 * Map it all into the process's space as a single 462 * copy-on-write "data" segment. 463 */ 464 error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, 465 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 466 MAP_PRIVATE | MAP_FIXED, (caddr_t)vp, file_offset); 467 if (error) 468 goto cleanup; 469 } 470 #ifdef DEBUG 471 printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long*)vmaddr)[0], 472 ((long*)vmaddr)[1]); 473 #endif 474 if (bss_size != 0) { 475 /* Calculate BSS start address */ 476 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 477 a_out->a_data; 478 479 /* allocate some 'anon' space */ 480 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 481 &vmaddr, bss_size, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0); 482 if (error) 483 goto cleanup; 484 } 485 486 cleanup: 487 /* Unlock vnode if needed */ 488 if (locked) 489 VOP_UNLOCK(vp, 0, td); 490 491 /* Release the kernel mapping. */ 492 if (a_out) 493 vm_map_remove(kernel_map, (vm_offset_t)a_out, 494 (vm_offset_t)a_out + PAGE_SIZE); 495 496 return error; 497 } 498 499 #endif /* __i386__ || __alpha__ */ 500 501 int 502 linux_select(struct thread *td, struct linux_select_args *args) 503 { 504 l_timeval ltv; 505 struct timeval tv0, tv1, utv, *tvp; 506 int error; 507 508 #ifdef DEBUG 509 if (ldebug(select)) 510 printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, 511 (void *)args->readfds, (void *)args->writefds, 512 (void *)args->exceptfds, (void *)args->timeout); 513 #endif 514 515 /* 516 * Store current time for computation of the amount of 517 * time left. 518 */ 519 if (args->timeout) { 520 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 521 goto select_out; 522 utv.tv_sec = ltv.tv_sec; 523 utv.tv_usec = ltv.tv_usec; 524 #ifdef DEBUG 525 if (ldebug(select)) 526 printf(LMSG("incoming timeout (%ld/%ld)"), 527 utv.tv_sec, utv.tv_usec); 528 #endif 529 530 if (itimerfix(&utv)) { 531 /* 532 * The timeval was invalid. Convert it to something 533 * valid that will act as it does under Linux. 534 */ 535 utv.tv_sec += utv.tv_usec / 1000000; 536 utv.tv_usec %= 1000000; 537 if (utv.tv_usec < 0) { 538 utv.tv_sec -= 1; 539 utv.tv_usec += 1000000; 540 } 541 if (utv.tv_sec < 0) 542 timevalclear(&utv); 543 } 544 microtime(&tv0); 545 tvp = &utv; 546 } else 547 tvp = NULL; 548 549 error = kern_select(td, args->nfds, args->readfds, args->writefds, 550 args->exceptfds, tvp); 551 552 #ifdef DEBUG 553 if (ldebug(select)) 554 printf(LMSG("real select returns %d"), error); 555 #endif 556 if (error) { 557 /* 558 * See fs/select.c in the Linux kernel. Without this, 559 * Maelstrom doesn't work. 560 */ 561 if (error == ERESTART) 562 error = EINTR; 563 goto select_out; 564 } 565 566 if (args->timeout) { 567 if (td->td_retval[0]) { 568 /* 569 * Compute how much time was left of the timeout, 570 * by subtracting the current time and the time 571 * before we started the call, and subtracting 572 * that result from the user-supplied value. 573 */ 574 microtime(&tv1); 575 timevalsub(&tv1, &tv0); 576 timevalsub(&utv, &tv1); 577 if (utv.tv_sec < 0) 578 timevalclear(&utv); 579 } else 580 timevalclear(&utv); 581 #ifdef DEBUG 582 if (ldebug(select)) 583 printf(LMSG("outgoing timeout (%ld/%ld)"), 584 utv.tv_sec, utv.tv_usec); 585 #endif 586 ltv.tv_sec = utv.tv_sec; 587 ltv.tv_usec = utv.tv_usec; 588 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 589 goto select_out; 590 } 591 592 select_out: 593 #ifdef DEBUG 594 if (ldebug(select)) 595 printf(LMSG("select_out -> %d"), error); 596 #endif 597 return error; 598 } 599 600 int 601 linux_mremap(struct thread *td, struct linux_mremap_args *args) 602 { 603 struct munmap_args /* { 604 void *addr; 605 size_t len; 606 } */ bsd_args; 607 int error = 0; 608 609 #ifdef DEBUG 610 if (ldebug(mremap)) 611 printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), 612 (void *)(uintptr_t)args->addr, 613 (unsigned long)args->old_len, 614 (unsigned long)args->new_len, 615 (unsigned long)args->flags); 616 #endif 617 args->new_len = round_page(args->new_len); 618 args->old_len = round_page(args->old_len); 619 620 if (args->new_len > args->old_len) { 621 td->td_retval[0] = 0; 622 return ENOMEM; 623 } 624 625 if (args->new_len < args->old_len) { 626 bsd_args.addr = 627 (caddr_t)((uintptr_t)args->addr + args->new_len); 628 bsd_args.len = args->old_len - args->new_len; 629 error = munmap(td, &bsd_args); 630 } 631 632 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 633 return error; 634 } 635 636 #define LINUX_MS_ASYNC 0x0001 637 #define LINUX_MS_INVALIDATE 0x0002 638 #define LINUX_MS_SYNC 0x0004 639 640 int 641 linux_msync(struct thread *td, struct linux_msync_args *args) 642 { 643 struct msync_args bsd_args; 644 645 bsd_args.addr = (caddr_t)(uintptr_t)args->addr; 646 bsd_args.len = (uintptr_t)args->len; 647 bsd_args.flags = args->fl & ~LINUX_MS_SYNC; 648 649 return msync(td, &bsd_args); 650 } 651 652 #ifndef __alpha__ 653 int 654 linux_time(struct thread *td, struct linux_time_args *args) 655 { 656 struct timeval tv; 657 l_time_t tm; 658 int error; 659 660 #ifdef DEBUG 661 if (ldebug(time)) 662 printf(ARGS(time, "*")); 663 #endif 664 665 microtime(&tv); 666 tm = tv.tv_sec; 667 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 668 return error; 669 td->td_retval[0] = tm; 670 return 0; 671 } 672 #endif /*!__alpha__*/ 673 674 struct l_times_argv { 675 l_long tms_utime; 676 l_long tms_stime; 677 l_long tms_cutime; 678 l_long tms_cstime; 679 }; 680 681 #ifdef __alpha__ 682 #define CLK_TCK 1024 /* Linux uses 1024 on alpha */ 683 #else 684 #define CLK_TCK 100 /* Linux uses 100 */ 685 #endif 686 687 #define CONVTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 688 689 int 690 linux_times(struct thread *td, struct linux_times_args *args) 691 { 692 struct timeval tv, utime, stime, cutime, cstime; 693 struct l_times_argv tms; 694 struct proc *p; 695 int error; 696 697 #ifdef DEBUG 698 if (ldebug(times)) 699 printf(ARGS(times, "*")); 700 #endif 701 702 p = td->td_proc; 703 PROC_LOCK(p); 704 calcru(p, &utime, &stime); 705 calccru(p, &cutime, &cstime); 706 PROC_UNLOCK(p); 707 708 tms.tms_utime = CONVTCK(utime); 709 tms.tms_stime = CONVTCK(stime); 710 711 tms.tms_cutime = CONVTCK(cutime); 712 tms.tms_cstime = CONVTCK(cstime); 713 714 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 715 return error; 716 717 microuptime(&tv); 718 td->td_retval[0] = (int)CONVTCK(tv); 719 return 0; 720 } 721 722 int 723 linux_newuname(struct thread *td, struct linux_newuname_args *args) 724 { 725 struct l_new_utsname utsname; 726 char osname[LINUX_MAX_UTSNAME]; 727 char osrelease[LINUX_MAX_UTSNAME]; 728 char *p; 729 730 #ifdef DEBUG 731 if (ldebug(newuname)) 732 printf(ARGS(newuname, "*")); 733 #endif 734 735 linux_get_osname(td, osname); 736 linux_get_osrelease(td, osrelease); 737 738 bzero(&utsname, sizeof(utsname)); 739 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 740 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 741 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 742 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 743 for (p = utsname.version; *p != '\0'; ++p) 744 if (*p == '\n') { 745 *p = '\0'; 746 break; 747 } 748 #ifdef __i386__ 749 { 750 const char *class; 751 switch (cpu_class) { 752 case CPUCLASS_686: 753 class = "i686"; 754 break; 755 case CPUCLASS_586: 756 class = "i586"; 757 break; 758 case CPUCLASS_486: 759 class = "i486"; 760 break; 761 default: 762 class = "i386"; 763 } 764 strlcpy(utsname.machine, class, LINUX_MAX_UTSNAME); 765 } 766 #elif defined(__amd64__) /* XXX: Linux can change 'personality'. */ 767 #ifdef COMPAT_LINUX32 768 strlcpy(utsname.machine, "i686", LINUX_MAX_UTSNAME); 769 #else 770 strlcpy(utsname.machine, "x86_64", LINUX_MAX_UTSNAME); 771 #endif /* COMPAT_LINUX32 */ 772 #else /* something other than i386 or amd64 - assume we and Linux agree */ 773 strlcpy(utsname.machine, machine, LINUX_MAX_UTSNAME); 774 #endif /* __i386__ */ 775 strlcpy(utsname.domainname, domainname, LINUX_MAX_UTSNAME); 776 777 return (copyout(&utsname, args->buf, sizeof(utsname))); 778 } 779 780 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 781 struct l_utimbuf { 782 l_time_t l_actime; 783 l_time_t l_modtime; 784 }; 785 786 int 787 linux_utime(struct thread *td, struct linux_utime_args *args) 788 { 789 struct timeval tv[2], *tvp; 790 struct l_utimbuf lut; 791 char *fname; 792 int error; 793 794 LCONVPATHEXIST(td, args->fname, &fname); 795 796 #ifdef DEBUG 797 if (ldebug(utime)) 798 printf(ARGS(utime, "%s, *"), fname); 799 #endif 800 801 if (args->times) { 802 if ((error = copyin(args->times, &lut, sizeof lut))) { 803 LFREEPATH(fname); 804 return error; 805 } 806 tv[0].tv_sec = lut.l_actime; 807 tv[0].tv_usec = 0; 808 tv[1].tv_sec = lut.l_modtime; 809 tv[1].tv_usec = 0; 810 tvp = tv; 811 } else 812 tvp = NULL; 813 814 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 815 LFREEPATH(fname); 816 return (error); 817 } 818 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 819 820 #define __WCLONE 0x80000000 821 822 #ifndef __alpha__ 823 int 824 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 825 { 826 int error, options, tmpstat; 827 828 #ifdef DEBUG 829 if (ldebug(waitpid)) 830 printf(ARGS(waitpid, "%d, %p, %d"), 831 args->pid, (void *)args->status, args->options); 832 #endif 833 834 options = (args->options & (WNOHANG | WUNTRACED)); 835 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 836 if (args->options & __WCLONE) 837 options |= WLINUXCLONE; 838 839 error = kern_wait(td, args->pid, &tmpstat, options, NULL); 840 if (error) 841 return error; 842 843 if (args->status) { 844 tmpstat &= 0xffff; 845 if (WIFSIGNALED(tmpstat)) 846 tmpstat = (tmpstat & 0xffffff80) | 847 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 848 else if (WIFSTOPPED(tmpstat)) 849 tmpstat = (tmpstat & 0xffff00ff) | 850 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 851 return copyout(&tmpstat, args->status, sizeof(int)); 852 } 853 854 return 0; 855 } 856 #endif /*!__alpha__*/ 857 858 int 859 linux_wait4(struct thread *td, struct linux_wait4_args *args) 860 { 861 int error, options, tmpstat; 862 struct rusage ru, *rup; 863 struct proc *p; 864 865 #ifdef DEBUG 866 if (ldebug(wait4)) 867 printf(ARGS(wait4, "%d, %p, %d, %p"), 868 args->pid, (void *)args->status, args->options, 869 (void *)args->rusage); 870 #endif 871 872 options = (args->options & (WNOHANG | WUNTRACED)); 873 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 874 if (args->options & __WCLONE) 875 options |= WLINUXCLONE; 876 877 if (args->rusage != NULL) 878 rup = &ru; 879 else 880 rup = NULL; 881 error = kern_wait(td, args->pid, &tmpstat, options, rup); 882 if (error) 883 return error; 884 885 p = td->td_proc; 886 PROC_LOCK(p); 887 SIGDELSET(p->p_siglist, SIGCHLD); 888 PROC_UNLOCK(p); 889 890 if (args->status) { 891 tmpstat &= 0xffff; 892 if (WIFSIGNALED(tmpstat)) 893 tmpstat = (tmpstat & 0xffffff80) | 894 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 895 else if (WIFSTOPPED(tmpstat)) 896 tmpstat = (tmpstat & 0xffff00ff) | 897 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 898 error = copyout(&tmpstat, args->status, sizeof(int)); 899 } 900 if (args->rusage != NULL && error == 0) 901 error = copyout(&ru, args->rusage, sizeof(ru)); 902 903 return (error); 904 } 905 906 int 907 linux_mknod(struct thread *td, struct linux_mknod_args *args) 908 { 909 char *path; 910 int error; 911 912 LCONVPATHCREAT(td, args->path, &path); 913 914 #ifdef DEBUG 915 if (ldebug(mknod)) 916 printf(ARGS(mknod, "%s, %d, %d"), path, args->mode, args->dev); 917 #endif 918 919 if (args->mode & S_IFIFO) 920 error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode); 921 else 922 error = kern_mknod(td, path, UIO_SYSSPACE, args->mode, 923 args->dev); 924 LFREEPATH(path); 925 return (error); 926 } 927 928 /* 929 * UGH! This is just about the dumbest idea I've ever heard!! 930 */ 931 int 932 linux_personality(struct thread *td, struct linux_personality_args *args) 933 { 934 #ifdef DEBUG 935 if (ldebug(personality)) 936 printf(ARGS(personality, "%lu"), (unsigned long)args->per); 937 #endif 938 #ifndef __alpha__ 939 if (args->per != 0) 940 return EINVAL; 941 #endif 942 943 /* Yes Jim, it's still a Linux... */ 944 td->td_retval[0] = 0; 945 return 0; 946 } 947 948 struct l_itimerval { 949 l_timeval it_interval; 950 l_timeval it_value; 951 }; 952 953 #define B2L_ITIMERVAL(bip, lip) \ 954 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 955 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 956 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 957 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 958 959 int 960 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 961 { 962 int error; 963 struct l_itimerval ls; 964 struct itimerval aitv, oitv; 965 966 #ifdef DEBUG 967 if (ldebug(setitimer)) 968 printf(ARGS(setitimer, "%p, %p"), 969 (void *)uap->itv, (void *)uap->oitv); 970 #endif 971 972 if (uap->itv == NULL) { 973 uap->itv = uap->oitv; 974 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 975 } 976 977 error = copyin(uap->itv, &ls, sizeof(ls)); 978 if (error != 0) 979 return (error); 980 B2L_ITIMERVAL(&aitv, &ls); 981 #ifdef DEBUG 982 if (ldebug(setitimer)) { 983 printf("setitimer: value: sec: %ld, usec: %ld\n", 984 aitv.it_value.tv_sec, aitv.it_value.tv_usec); 985 printf("setitimer: interval: sec: %ld, usec: %ld\n", 986 aitv.it_interval.tv_sec, aitv.it_interval.tv_usec); 987 } 988 #endif 989 error = kern_setitimer(td, uap->which, &aitv, &oitv); 990 if (error != 0 || uap->oitv == NULL) 991 return (error); 992 B2L_ITIMERVAL(&ls, &oitv); 993 994 return (copyout(&ls, uap->oitv, sizeof(ls))); 995 } 996 997 int 998 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 999 { 1000 int error; 1001 struct l_itimerval ls; 1002 struct itimerval aitv; 1003 1004 #ifdef DEBUG 1005 if (ldebug(getitimer)) 1006 printf(ARGS(getitimer, "%p"), (void *)uap->itv); 1007 #endif 1008 error = kern_getitimer(td, uap->which, &aitv); 1009 if (error != 0) 1010 return (error); 1011 B2L_ITIMERVAL(&ls, &aitv); 1012 return (copyout(&ls, uap->itv, sizeof(ls))); 1013 } 1014 1015 #ifndef __alpha__ 1016 int 1017 linux_nice(struct thread *td, struct linux_nice_args *args) 1018 { 1019 struct setpriority_args bsd_args; 1020 1021 bsd_args.which = PRIO_PROCESS; 1022 bsd_args.who = 0; /* current process */ 1023 bsd_args.prio = args->inc; 1024 return setpriority(td, &bsd_args); 1025 } 1026 #endif /*!__alpha__*/ 1027 1028 int 1029 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1030 { 1031 struct ucred *newcred, *oldcred; 1032 l_gid_t linux_gidset[NGROUPS]; 1033 gid_t *bsd_gidset; 1034 int ngrp, error; 1035 struct proc *p; 1036 1037 ngrp = args->gidsetsize; 1038 if (ngrp < 0 || ngrp >= NGROUPS) 1039 return (EINVAL); 1040 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1041 if (error) 1042 return (error); 1043 newcred = crget(); 1044 p = td->td_proc; 1045 PROC_LOCK(p); 1046 oldcred = p->p_ucred; 1047 1048 /* 1049 * cr_groups[0] holds egid. Setting the whole set from 1050 * the supplied set will cause egid to be changed too. 1051 * Keep cr_groups[0] unchanged to prevent that. 1052 */ 1053 1054 if ((error = suser_cred(oldcred, SUSER_ALLOWJAIL)) != 0) { 1055 PROC_UNLOCK(p); 1056 crfree(newcred); 1057 return (error); 1058 } 1059 1060 crcopy(newcred, oldcred); 1061 if (ngrp > 0) { 1062 newcred->cr_ngroups = ngrp + 1; 1063 1064 bsd_gidset = newcred->cr_groups; 1065 ngrp--; 1066 while (ngrp >= 0) { 1067 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1068 ngrp--; 1069 } 1070 } 1071 else 1072 newcred->cr_ngroups = 1; 1073 1074 setsugid(p); 1075 p->p_ucred = newcred; 1076 PROC_UNLOCK(p); 1077 crfree(oldcred); 1078 return (0); 1079 } 1080 1081 int 1082 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1083 { 1084 struct ucred *cred; 1085 l_gid_t linux_gidset[NGROUPS]; 1086 gid_t *bsd_gidset; 1087 int bsd_gidsetsz, ngrp, error; 1088 1089 cred = td->td_ucred; 1090 bsd_gidset = cred->cr_groups; 1091 bsd_gidsetsz = cred->cr_ngroups - 1; 1092 1093 /* 1094 * cr_groups[0] holds egid. Returning the whole set 1095 * here will cause a duplicate. Exclude cr_groups[0] 1096 * to prevent that. 1097 */ 1098 1099 if ((ngrp = args->gidsetsize) == 0) { 1100 td->td_retval[0] = bsd_gidsetsz; 1101 return (0); 1102 } 1103 1104 if (ngrp < bsd_gidsetsz) 1105 return (EINVAL); 1106 1107 ngrp = 0; 1108 while (ngrp < bsd_gidsetsz) { 1109 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1110 ngrp++; 1111 } 1112 1113 if ((error = copyout(linux_gidset, args->grouplist, 1114 ngrp * sizeof(l_gid_t)))) 1115 return (error); 1116 1117 td->td_retval[0] = ngrp; 1118 return (0); 1119 } 1120 1121 #ifndef __alpha__ 1122 int 1123 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1124 { 1125 struct rlimit bsd_rlim; 1126 struct l_rlimit rlim; 1127 u_int which; 1128 int error; 1129 1130 #ifdef DEBUG 1131 if (ldebug(setrlimit)) 1132 printf(ARGS(setrlimit, "%d, %p"), 1133 args->resource, (void *)args->rlim); 1134 #endif 1135 1136 if (args->resource >= LINUX_RLIM_NLIMITS) 1137 return (EINVAL); 1138 1139 which = linux_to_bsd_resource[args->resource]; 1140 if (which == -1) 1141 return (EINVAL); 1142 1143 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1144 if (error) 1145 return (error); 1146 1147 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1148 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1149 return (kern_setrlimit(td, which, &bsd_rlim)); 1150 } 1151 1152 int 1153 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1154 { 1155 struct l_rlimit rlim; 1156 struct proc *p = td->td_proc; 1157 struct rlimit bsd_rlim; 1158 u_int which; 1159 1160 #ifdef DEBUG 1161 if (ldebug(old_getrlimit)) 1162 printf(ARGS(old_getrlimit, "%d, %p"), 1163 args->resource, (void *)args->rlim); 1164 #endif 1165 1166 if (args->resource >= LINUX_RLIM_NLIMITS) 1167 return (EINVAL); 1168 1169 which = linux_to_bsd_resource[args->resource]; 1170 if (which == -1) 1171 return (EINVAL); 1172 1173 PROC_LOCK(p); 1174 lim_rlimit(p, which, &bsd_rlim); 1175 PROC_UNLOCK(p); 1176 1177 #ifdef COMPAT_LINUX32 1178 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1179 if (rlim.rlim_cur == UINT_MAX) 1180 rlim.rlim_cur = INT_MAX; 1181 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1182 if (rlim.rlim_max == UINT_MAX) 1183 rlim.rlim_max = INT_MAX; 1184 #else 1185 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1186 if (rlim.rlim_cur == ULONG_MAX) 1187 rlim.rlim_cur = LONG_MAX; 1188 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1189 if (rlim.rlim_max == ULONG_MAX) 1190 rlim.rlim_max = LONG_MAX; 1191 #endif 1192 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1193 } 1194 1195 int 1196 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1197 { 1198 struct l_rlimit rlim; 1199 struct proc *p = td->td_proc; 1200 struct rlimit bsd_rlim; 1201 u_int which; 1202 1203 #ifdef DEBUG 1204 if (ldebug(getrlimit)) 1205 printf(ARGS(getrlimit, "%d, %p"), 1206 args->resource, (void *)args->rlim); 1207 #endif 1208 1209 if (args->resource >= LINUX_RLIM_NLIMITS) 1210 return (EINVAL); 1211 1212 which = linux_to_bsd_resource[args->resource]; 1213 if (which == -1) 1214 return (EINVAL); 1215 1216 PROC_LOCK(p); 1217 lim_rlimit(p, which, &bsd_rlim); 1218 PROC_UNLOCK(p); 1219 1220 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1221 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1222 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1223 } 1224 #endif /*!__alpha__*/ 1225 1226 int 1227 linux_sched_setscheduler(struct thread *td, 1228 struct linux_sched_setscheduler_args *args) 1229 { 1230 struct sched_setscheduler_args bsd; 1231 1232 #ifdef DEBUG 1233 if (ldebug(sched_setscheduler)) 1234 printf(ARGS(sched_setscheduler, "%d, %d, %p"), 1235 args->pid, args->policy, (const void *)args->param); 1236 #endif 1237 1238 switch (args->policy) { 1239 case LINUX_SCHED_OTHER: 1240 bsd.policy = SCHED_OTHER; 1241 break; 1242 case LINUX_SCHED_FIFO: 1243 bsd.policy = SCHED_FIFO; 1244 break; 1245 case LINUX_SCHED_RR: 1246 bsd.policy = SCHED_RR; 1247 break; 1248 default: 1249 return EINVAL; 1250 } 1251 1252 bsd.pid = args->pid; 1253 bsd.param = (struct sched_param *)args->param; 1254 return sched_setscheduler(td, &bsd); 1255 } 1256 1257 int 1258 linux_sched_getscheduler(struct thread *td, 1259 struct linux_sched_getscheduler_args *args) 1260 { 1261 struct sched_getscheduler_args bsd; 1262 int error; 1263 1264 #ifdef DEBUG 1265 if (ldebug(sched_getscheduler)) 1266 printf(ARGS(sched_getscheduler, "%d"), args->pid); 1267 #endif 1268 1269 bsd.pid = args->pid; 1270 error = sched_getscheduler(td, &bsd); 1271 1272 switch (td->td_retval[0]) { 1273 case SCHED_OTHER: 1274 td->td_retval[0] = LINUX_SCHED_OTHER; 1275 break; 1276 case SCHED_FIFO: 1277 td->td_retval[0] = LINUX_SCHED_FIFO; 1278 break; 1279 case SCHED_RR: 1280 td->td_retval[0] = LINUX_SCHED_RR; 1281 break; 1282 } 1283 1284 return error; 1285 } 1286 1287 int 1288 linux_sched_get_priority_max(struct thread *td, 1289 struct linux_sched_get_priority_max_args *args) 1290 { 1291 struct sched_get_priority_max_args bsd; 1292 1293 #ifdef DEBUG 1294 if (ldebug(sched_get_priority_max)) 1295 printf(ARGS(sched_get_priority_max, "%d"), args->policy); 1296 #endif 1297 1298 switch (args->policy) { 1299 case LINUX_SCHED_OTHER: 1300 bsd.policy = SCHED_OTHER; 1301 break; 1302 case LINUX_SCHED_FIFO: 1303 bsd.policy = SCHED_FIFO; 1304 break; 1305 case LINUX_SCHED_RR: 1306 bsd.policy = SCHED_RR; 1307 break; 1308 default: 1309 return EINVAL; 1310 } 1311 return sched_get_priority_max(td, &bsd); 1312 } 1313 1314 int 1315 linux_sched_get_priority_min(struct thread *td, 1316 struct linux_sched_get_priority_min_args *args) 1317 { 1318 struct sched_get_priority_min_args bsd; 1319 1320 #ifdef DEBUG 1321 if (ldebug(sched_get_priority_min)) 1322 printf(ARGS(sched_get_priority_min, "%d"), args->policy); 1323 #endif 1324 1325 switch (args->policy) { 1326 case LINUX_SCHED_OTHER: 1327 bsd.policy = SCHED_OTHER; 1328 break; 1329 case LINUX_SCHED_FIFO: 1330 bsd.policy = SCHED_FIFO; 1331 break; 1332 case LINUX_SCHED_RR: 1333 bsd.policy = SCHED_RR; 1334 break; 1335 default: 1336 return EINVAL; 1337 } 1338 return sched_get_priority_min(td, &bsd); 1339 } 1340 1341 #define REBOOT_CAD_ON 0x89abcdef 1342 #define REBOOT_CAD_OFF 0 1343 #define REBOOT_HALT 0xcdef0123 1344 1345 int 1346 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1347 { 1348 struct reboot_args bsd_args; 1349 1350 #ifdef DEBUG 1351 if (ldebug(reboot)) 1352 printf(ARGS(reboot, "0x%x"), args->cmd); 1353 #endif 1354 if (args->cmd == REBOOT_CAD_ON || args->cmd == REBOOT_CAD_OFF) 1355 return (0); 1356 bsd_args.opt = (args->cmd == REBOOT_HALT) ? RB_HALT : 0; 1357 return (reboot(td, &bsd_args)); 1358 } 1359 1360 #ifndef __alpha__ 1361 1362 /* 1363 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify 1364 * td->td_retval[1] when COMPAT_43 is defined. This 1365 * globbers registers that are assumed to be preserved. The following 1366 * lightweight syscalls fixes this. See also linux_getgid16() and 1367 * linux_getuid16() in linux_uid16.c. 1368 * 1369 * linux_getpid() - MP SAFE 1370 * linux_getgid() - MP SAFE 1371 * linux_getuid() - MP SAFE 1372 */ 1373 1374 int 1375 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1376 { 1377 1378 td->td_retval[0] = td->td_proc->p_pid; 1379 return (0); 1380 } 1381 1382 int 1383 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1384 { 1385 1386 td->td_retval[0] = td->td_ucred->cr_rgid; 1387 return (0); 1388 } 1389 1390 int 1391 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1392 { 1393 1394 td->td_retval[0] = td->td_ucred->cr_ruid; 1395 return (0); 1396 } 1397 1398 #endif /*!__alpha__*/ 1399 1400 int 1401 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1402 { 1403 struct getsid_args bsd; 1404 bsd.pid = args->pid; 1405 return getsid(td, &bsd); 1406 } 1407