Lines Matching +full:activate +full:- +full:to +full:- +full:activate
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
5 * Copyright (c) 2008-2009, Lawrence Stewart <lstewart@freebsd.org>
6 * Copyright (c) 2009-2010, The FreeBSD Foundation
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87 #define ALQ_LOCK(alq) mtx_lock_spin(&(alq)->aq_mtx)
88 #define ALQ_UNLOCK(alq) mtx_unlock_spin(&(alq)->aq_mtx)
90 #define HAS_PENDING_DATA(alq) ((alq)->aq_freebytes != (alq)->aq_buflen)
123 * Add a new queue to the global list. Fail if we're shutting down.
179 alq->aq_flags &= ~AQ_ACTIVE; in ald_deactivate()
240 /* Shutdown all ALQs prior to terminating the ald_daemon. */ in ald_shutdown()
251 * Wake ald_daemon so that it exits. It won't be able to do in ald_shutdown()
256 /* Wait for ald_daemon to exit. */ in ald_shutdown()
268 alq->aq_flags |= AQ_SHUTDOWN; in alq_shutdown()
272 * the ALQ_NOACTIVATE flag has been used), explicitly activate the in alq_shutdown()
275 if (!(alq->aq_flags & AQ_ACTIVE) && HAS_PENDING_DATA(alq)) { in alq_shutdown()
276 alq->aq_flags |= AQ_ACTIVE; in alq_shutdown()
285 while (alq->aq_flags & AQ_ACTIVE) { in alq_shutdown()
286 alq->aq_flags |= AQ_WANTED; in alq_shutdown()
287 msleep_spin(alq, &alq->aq_mtx, "aldclose", 0); in alq_shutdown()
291 vn_close(alq->aq_vp, FWRITE, alq->aq_cred, in alq_shutdown()
293 crfree(alq->aq_cred); in alq_shutdown()
302 mtx_destroy(&alq->aq_mtx); in alq_destroy()
303 free(alq->aq_entbuf, M_ALD); in alq_destroy()
308 * Flush all pending data to disk. This operation will block.
324 vp = alq->aq_vp; in alq_doio()
328 wrapearly = alq->aq_wrapearly; in alq_doio()
334 aiov[0].iov_base = alq->aq_entbuf + alq->aq_writetail; in alq_doio()
336 if (alq->aq_writetail < alq->aq_writehead) { in alq_doio()
338 totlen = aiov[0].iov_len = alq->aq_writehead - alq->aq_writetail; in alq_doio()
339 } else if (alq->aq_writehead == 0) { in alq_doio()
340 /* Buffer not wrapped (special case to avoid an empty iov). */ in alq_doio()
341 totlen = aiov[0].iov_len = alq->aq_buflen - alq->aq_writetail - in alq_doio()
346 * - first is from writetail to end of buffer in alq_doio()
347 * - second is from start of buffer to writehead in alq_doio()
349 aiov[0].iov_len = alq->aq_buflen - alq->aq_writetail - in alq_doio()
352 aiov[1].iov_base = alq->aq_entbuf; in alq_doio()
353 aiov[1].iov_len = alq->aq_writehead; in alq_doio()
357 alq->aq_flags |= AQ_FLUSHING; in alq_doio()
369 * Do all of the junk required to write now. in alq_doio()
377 if (mac_vnode_check_write(alq->aq_cred, NOCRED, vp) == 0) in alq_doio()
379 VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, alq->aq_cred); in alq_doio()
384 alq->aq_flags &= ~AQ_FLUSHING; in alq_doio()
387 alq->aq_writetail = (alq->aq_writetail + totlen + wrapearly) % in alq_doio()
388 alq->aq_buflen; in alq_doio()
389 alq->aq_freebytes += totlen + wrapearly; in alq_doio()
396 alq->aq_wrapearly = 0; in alq_doio()
399 * If we just flushed the buffer completely, reset indexes to 0 to in alq_doio()
401 * This is also required to ensure alq_getn() can't wedge itself. in alq_doio()
404 alq->aq_writehead = alq->aq_writetail = 0; in alq_doio()
406 KASSERT((alq->aq_writetail >= 0 && alq->aq_writetail < alq->aq_buflen), in alq_doio()
409 if (alq->aq_flags & AQ_WANTED) { in alq_doio()
410 alq->aq_flags &= ~AQ_WANTED; in alq_doio()
457 alq->aq_vp = nd.ni_vp; in alq_open_flags()
458 alq->aq_cred = crhold(cred); in alq_open_flags()
460 mtx_init(&alq->aq_mtx, "ALD Queue", NULL, MTX_SPIN|MTX_QUIET); in alq_open_flags()
462 alq->aq_buflen = size; in alq_open_flags()
463 alq->aq_entmax = 0; in alq_open_flags()
464 alq->aq_entlen = 0; in alq_open_flags()
466 alq->aq_freebytes = alq->aq_buflen; in alq_open_flags()
467 alq->aq_entbuf = malloc(alq->aq_buflen, M_ALD, M_WAITOK|M_ZERO); in alq_open_flags()
468 alq->aq_writehead = alq->aq_writetail = 0; in alq_open_flags()
470 alq->aq_flags |= AQ_ORDERED; in alq_open_flags()
493 (*alqp)->aq_flags |= AQ_LEGACY; in alq_open()
494 (*alqp)->aq_entmax = count; in alq_open()
495 (*alqp)->aq_entlen = size; in alq_open()
510 int activate, copy, ret; in alq_writen() local
513 KASSERT((len > 0 && len <= alq->aq_buflen), in alq_writen()
516 activate = ret = 0; in alq_writen()
523 * Fail to perform the write and return EWOULDBLOCK if: in alq_writen()
524 * - The message is larger than our underlying buffer. in alq_writen()
525 * - The ALQ is being shutdown. in alq_writen()
526 * - There is insufficient free space in our underlying buffer in alq_writen()
527 * to accept the message and the user can't wait for space. in alq_writen()
528 * - There is insufficient free space in our underlying buffer in alq_writen()
529 * to accept the message and the alq is inactive due to prior in alq_writen()
530 * use of the ALQ_NOACTIVATE flag (which would lead to deadlock). in alq_writen()
532 if (len > alq->aq_buflen || in alq_writen()
533 alq->aq_flags & AQ_SHUTDOWN || in alq_writen()
534 (((flags & ALQ_NOWAIT) || (!(alq->aq_flags & AQ_ACTIVE) && in alq_writen()
535 HAS_PENDING_DATA(alq))) && alq->aq_freebytes < len)) { in alq_writen()
542 * waiting for resources to become available, sleep until we're woken. in alq_writen()
544 if (alq->aq_flags & AQ_ORDERED && alq->aq_waiters > 0) { in alq_writen()
547 alq->aq_waiters++; in alq_writen()
548 msleep_spin(&alq->aq_waiters, &alq->aq_mtx, "alqwnord", 0); in alq_writen()
549 alq->aq_waiters--; in alq_writen()
559 while (alq->aq_freebytes < len && !(alq->aq_flags & AQ_SHUTDOWN)) { in alq_writen()
562 alq->aq_flags |= AQ_WANTED; in alq_writen()
563 alq->aq_waiters++; in alq_writen()
566 msleep_spin(alq, &alq->aq_mtx, "alqwnres", 0); in alq_writen()
567 alq->aq_waiters--; in alq_writen()
570 * If we're the first thread to wake after an AQ_WANTED wakeup in alq_writen()
571 * but there isn't enough free space for us, we're going to loop in alq_writen()
576 if (alq->aq_waiters > 0 && !(alq->aq_flags & AQ_ORDERED) && in alq_writen()
577 alq->aq_freebytes < len && !(alq->aq_flags & AQ_WANTED)) in alq_writen()
584 * If there are waiters, we need to signal the waiting threads after we in alq_writen()
586 * requiring resources to be freed up. In the AQ_ORDERED case, threads in alq_writen()
587 * are not allowed to concurrently compete for resources in the above in alq_writen()
590 if (alq->aq_waiters > 0) { in alq_writen()
591 if (alq->aq_flags & AQ_ORDERED) in alq_writen()
592 waitchan = &alq->aq_waiters; in alq_writen()
599 if (alq->aq_flags & AQ_SHUTDOWN) { in alq_writen()
605 * If we need to wrap the buffer to accommodate the write, in alq_writen()
606 * we'll need 2 calls to bcopy. in alq_writen()
608 if ((alq->aq_buflen - alq->aq_writehead) < len) in alq_writen()
609 copy = alq->aq_buflen - alq->aq_writehead; in alq_writen()
611 /* Copy message (or part thereof if wrap required) to the buffer. */ in alq_writen()
612 bcopy(data, alq->aq_entbuf + alq->aq_writehead, copy); in alq_writen()
613 alq->aq_writehead += copy; in alq_writen()
615 if (alq->aq_writehead >= alq->aq_buflen) { in alq_writen()
616 KASSERT((alq->aq_writehead == alq->aq_buflen), in alq_writen()
617 ("%s: alq->aq_writehead (%d) > alq->aq_buflen (%d)", in alq_writen()
619 alq->aq_writehead, in alq_writen()
620 alq->aq_buflen)); in alq_writen()
621 alq->aq_writehead = 0; in alq_writen()
627 * to the start of the buffer and resetting aq_writehead. in alq_writen()
629 bcopy(((uint8_t *)data)+copy, alq->aq_entbuf, len - copy); in alq_writen()
630 alq->aq_writehead = len - copy; in alq_writen()
633 KASSERT((alq->aq_writehead >= 0 && alq->aq_writehead < alq->aq_buflen), in alq_writen()
636 alq->aq_freebytes -= len; in alq_writen()
638 if (!(alq->aq_flags & AQ_ACTIVE) && !(flags & ALQ_NOACTIVATE)) { in alq_writen()
639 alq->aq_flags |= AQ_ACTIVE; in alq_writen()
640 activate = 1; in alq_writen()
648 if (activate) { in alq_writen()
665 KASSERT((alq->aq_flags & AQ_LEGACY), in alq_write()
667 return (alq_writen(alq, data, alq->aq_entlen, flags)); in alq_write()
671 * Retrieve a pointer for the ALQ to write directly into, avoiding bcopy.
679 KASSERT((len > 0 && len <= alq->aq_buflen), in alq_getn()
680 ("%s: len <= 0 || len > alq->aq_buflen", __func__)); in alq_getn()
689 * the buffer is empty, they will both be set to 0 and therefore in alq_getn()
694 if (alq->aq_writehead <= alq->aq_writetail) in alq_getn()
695 contigbytes = alq->aq_freebytes; in alq_getn()
697 contigbytes = alq->aq_buflen - alq->aq_writehead; in alq_getn()
701 * Insufficient space at end of buffer to handle a in alq_getn()
704 * of the buffer which we will have to skip over when in alq_getn()
705 * flushing the buffer to disk. in alq_getn()
707 if (alq->aq_writetail >= len || flags & ALQ_WAITOK) { in alq_getn()
709 alq->aq_wrapearly = contigbytes; in alq_getn()
711 contigbytes = alq->aq_freebytes = in alq_getn()
712 alq->aq_writetail; in alq_getn()
713 alq->aq_writehead = 0; in alq_getn()
720 * - The message is larger than our underlying buffer. in alq_getn()
721 * - The ALQ is being shutdown. in alq_getn()
722 * - There is insufficient free space in our underlying buffer in alq_getn()
723 * to accept the message and the user can't wait for space. in alq_getn()
724 * - There is insufficient free space in our underlying buffer in alq_getn()
725 * to accept the message and the alq is inactive due to prior in alq_getn()
726 * use of the ALQ_NOACTIVATE flag (which would lead to deadlock). in alq_getn()
728 if (len > alq->aq_buflen || in alq_getn()
729 alq->aq_flags & AQ_SHUTDOWN || in alq_getn()
730 (((flags & ALQ_NOWAIT) || (!(alq->aq_flags & AQ_ACTIVE) && in alq_getn()
738 * waiting for resources to become available, sleep until we're woken. in alq_getn()
740 if (alq->aq_flags & AQ_ORDERED && alq->aq_waiters > 0) { in alq_getn()
743 alq->aq_waiters++; in alq_getn()
744 msleep_spin(&alq->aq_waiters, &alq->aq_mtx, "alqgnord", 0); in alq_getn()
745 alq->aq_waiters--; in alq_getn()
755 while (contigbytes < len && !(alq->aq_flags & AQ_SHUTDOWN)) { in alq_getn()
758 alq->aq_flags |= AQ_WANTED; in alq_getn()
759 alq->aq_waiters++; in alq_getn()
762 msleep_spin(alq, &alq->aq_mtx, "alqgnres", 0); in alq_getn()
763 alq->aq_waiters--; in alq_getn()
765 if (alq->aq_writehead <= alq->aq_writetail) in alq_getn()
766 contigbytes = alq->aq_freebytes; in alq_getn()
768 contigbytes = alq->aq_buflen - alq->aq_writehead; in alq_getn()
771 * If we're the first thread to wake after an AQ_WANTED wakeup in alq_getn()
772 * but there isn't enough free space for us, we're going to loop in alq_getn()
777 if (alq->aq_waiters > 0 && !(alq->aq_flags & AQ_ORDERED) && in alq_getn()
778 contigbytes < len && !(alq->aq_flags & AQ_WANTED)) in alq_getn()
785 * If there are waiters, we need to signal the waiting threads after we in alq_getn()
787 * requiring resources to be freed up. In the AQ_ORDERED case, threads in alq_getn()
788 * are not allowed to concurrently compete for resources in the above in alq_getn()
791 if (alq->aq_waiters > 0) { in alq_getn()
792 if (alq->aq_flags & AQ_ORDERED) in alq_getn()
793 waitchan = &alq->aq_waiters; in alq_getn()
800 if (alq->aq_flags & AQ_SHUTDOWN) { in alq_getn()
811 alq->aq_getpost.ae_data = alq->aq_entbuf + alq->aq_writehead; in alq_getn()
812 alq->aq_getpost.ae_bytesused = len; in alq_getn()
814 return (&alq->aq_getpost); in alq_getn()
821 KASSERT((alq->aq_flags & AQ_LEGACY), in alq_get()
823 return (alq_getn(alq, alq->aq_entlen, flags)); in alq_get()
829 int activate; in alq_post_flags() local
832 activate = 0; in alq_post_flags()
834 if (ale->ae_bytesused > 0) { in alq_post_flags()
835 if (!(alq->aq_flags & AQ_ACTIVE) && in alq_post_flags()
837 alq->aq_flags |= AQ_ACTIVE; in alq_post_flags()
838 activate = 1; in alq_post_flags()
841 alq->aq_writehead += ale->ae_bytesused; in alq_post_flags()
842 alq->aq_freebytes -= ale->ae_bytesused; in alq_post_flags()
844 /* Wrap aq_writehead if we filled to the end of the buffer. */ in alq_post_flags()
845 if (alq->aq_writehead == alq->aq_buflen) in alq_post_flags()
846 alq->aq_writehead = 0; in alq_post_flags()
848 KASSERT((alq->aq_writehead >= 0 && in alq_post_flags()
849 alq->aq_writehead < alq->aq_buflen), in alq_post_flags()
857 * If there are waiters, we need to signal the waiting threads after we in alq_post_flags()
859 * requiring resources to be freed up. In the AQ_ORDERED case, threads in alq_post_flags()
860 * are not allowed to concurrently compete for resources in the in alq_post_flags()
863 if (alq->aq_waiters > 0) { in alq_post_flags()
864 if (alq->aq_flags & AQ_ORDERED) in alq_post_flags()
865 waitchan = &alq->aq_waiters; in alq_post_flags()
873 if (activate) { in alq_post_flags()
893 * Pull the lever iff there is data to flush and we're in alq_flush()
896 if (HAS_PENDING_DATA(alq) && !(alq->aq_flags & AQ_FLUSHING)) { in alq_flush()
897 if (alq->aq_flags & AQ_ACTIVE) in alq_flush()