Lines Matching defs:lock_request

299 	lock_descriptor_t	*lock_request;
337 lock_request = &stack_lock_request;
338 (void) bzero((caddr_t)lock_request,
345 lock_request->l_edge.edge_in_next = &lock_request->l_edge;
346 lock_request->l_edge.edge_in_prev = &lock_request->l_edge;
347 lock_request->l_edge.edge_adj_next = &lock_request->l_edge;
348 lock_request->l_edge.edge_adj_prev = &lock_request->l_edge;
349 lock_request->l_status = FLK_INITIAL_STATE;
351 lock_request = flk_get_lock();
352 fp->f_filock = (struct filock *)lock_request;
354 lock_request->l_state = 0;
355 lock_request->l_vnode = vp;
356 lock_request->l_zoneid = getzoneid();
357 lock_request->l_ofd = fp;
363 error = flk_convert_lock_data(vp, lckdat, &lock_request->l_start,
364 &lock_request->l_end, offset);
368 error = flk_check_lock_data(lock_request->l_start, lock_request->l_end,
373 ASSERT(lock_request->l_end >= lock_request->l_start);
375 lock_request->l_type = lckdat->l_type;
377 lock_request->l_state |= WILLING_TO_SLEEP_LOCK;
380 if (lock_request->l_type == F_RDLCK ||
381 lock_request->l_type == F_WRLCK)
382 lock_request->l_state |= QUERY_LOCK;
384 lock_request->l_flock = (*lckdat);
390 if (fcmd != F_OFD_GETLK && lock_request->l_type != F_UNLCK &&
401 lock_request->l_state |= REFERENCED_LOCK;
402 lock_request->l_graph = gp;
404 switch (lock_request->l_type) {
407 if (IS_QUERY_LOCK(lock_request)) {
408 flk_get_first_blocking_lock(lock_request);
409 if (lock_request->l_ofd != NULL)
410 lock_request->l_flock.l_pid = -1;
411 (*lckdat) = lock_request->l_flock;
414 error = flk_process_request(lock_request);
420 error = flk_execute_request(lock_request);
428 if (lock_request == &stack_lock_request) {
429 flk_set_state(lock_request, FLK_DEAD_STATE);
431 lock_request->l_state &= ~REFERENCED_LOCK;
432 if ((error != 0) || IS_DELETED(lock_request)) {
433 flk_set_state(lock_request, FLK_DEAD_STATE);
434 flk_free_lock(lock_request);
445 flk_set_state(lock_request, FLK_DEAD_STATE);
446 if (lock_request != &stack_lock_request)
447 flk_free_lock(lock_request);
539 lock_descriptor_t *lock_request;
558 lock_request = &stack_lock_request;
559 (void) bzero((caddr_t)lock_request,
567 lock_request->l_edge.edge_in_next = &lock_request->l_edge;
568 lock_request->l_edge.edge_in_prev = &lock_request->l_edge;
569 lock_request->l_edge.edge_adj_next = &lock_request->l_edge;
570 lock_request->l_edge.edge_adj_prev = &lock_request->l_edge;
571 lock_request->l_status = FLK_INITIAL_STATE;
573 lock_request = flk_get_lock();
575 lock_request->l_state = 0;
576 lock_request->l_vnode = vp;
577 lock_request->l_zoneid = getzoneid();
588 lock_request->l_start = lckdat->l_start;
589 lock_request->l_end = (lckdat->l_len == 0) ? MAX_U_OFFSET_T :
594 &lock_request->l_start, &lock_request->l_end,
599 error = flk_check_lock_data(lock_request->l_start,
600 lock_request->l_end, MAXEND);
606 ASSERT(lock_request->l_end >= lock_request->l_start);
608 lock_request->l_type = lckdat->l_type;
610 lock_request->l_state |= IO_LOCK;
612 lock_request->l_state |= WILLING_TO_SLEEP_LOCK;
614 lock_request->l_state |= LOCKMGR_LOCK;
616 lock_request->l_state |= NBMAND_LOCK;
626 lock_request->l_state |= PXFS_LOCK;
629 if (lock_request->l_type == F_RDLCK ||
630 lock_request->l_type == F_WRLCK)
631 lock_request->l_state |= QUERY_LOCK;
633 lock_request->l_flock = (*lckdat);
634 lock_request->l_callbacks = flk_cbp;
639 if (IS_LOCKMGR(lock_request)) {
656 nlmid = GETNLMID(lock_request->l_flock.l_sysid);
693 if (IS_IO_LOCK(lock_request)) {
695 (lock_request->l_type == F_RDLCK) ?
700 lock_request->l_state |= REFERENCED_LOCK;
701 lock_request->l_graph = gp;
703 switch (lock_request->l_type) {
706 if (IS_QUERY_LOCK(lock_request)) {
707 flk_get_first_blocking_lock(lock_request);
708 if (lock_request->l_ofd != NULL)
709 lock_request->l_flock.l_pid = -1;
710 (*lckdat) = lock_request->l_flock;
716 error = flk_process_request(lock_request);
722 if (IS_LOCKMGR(lock_request) &&
723 flk_canceled(lock_request)) {
726 error = flk_execute_request(lock_request);
737 if (lock_request->l_flock.l_sysid == 0) {
745 flk_delete_locks_by_sysid(lock_request);
746 lock_request->l_state &= ~REFERENCED_LOCK;
747 flk_set_state(lock_request, FLK_DEAD_STATE);
748 flk_free_lock(lock_request);
759 lock_request->l_state &= ~REFERENCED_LOCK;
769 if (IS_IO_LOCK(lock_request)) {
771 (lock_request->l_type == F_RDLCK) ?
782 flk_wakeup(lock_request, 1);
783 flk_set_state(lock_request, FLK_DEAD_STATE);
784 flk_free_lock(lock_request);
795 if (lock_request == &stack_lock_request) {
796 flk_set_state(lock_request, FLK_DEAD_STATE);
798 lock_request->l_state &= ~REFERENCED_LOCK;
799 if ((error != 0) || IS_DELETED(lock_request)) {
800 flk_set_state(lock_request, FLK_DEAD_STATE);
801 flk_free_lock(lock_request);
809 flk_set_state(lock_request, FLK_DEAD_STATE);
810 if (lock_request != &stack_lock_request)
811 flk_free_lock(lock_request);
4119 * and ending points, which are put into lock_request. Returns 0 or an