1*e7fd4179SDavid Teigland /****************************************************************************** 2*e7fd4179SDavid Teigland ******************************************************************************* 3*e7fd4179SDavid Teigland ** 4*e7fd4179SDavid Teigland ** Copyright (C) 2005 Red Hat, Inc. All rights reserved. 5*e7fd4179SDavid Teigland ** 6*e7fd4179SDavid Teigland ** This copyrighted material is made available to anyone wishing to use, 7*e7fd4179SDavid Teigland ** modify, copy, or redistribute it subject to the terms and conditions 8*e7fd4179SDavid Teigland ** of the GNU General Public License v.2. 9*e7fd4179SDavid Teigland ** 10*e7fd4179SDavid Teigland ******************************************************************************* 11*e7fd4179SDavid Teigland ******************************************************************************/ 12*e7fd4179SDavid Teigland 13*e7fd4179SDavid Teigland #include "dlm_internal.h" 14*e7fd4179SDavid Teigland #include "member.h" 15*e7fd4179SDavid Teigland #include "lock.h" 16*e7fd4179SDavid Teigland #include "dir.h" 17*e7fd4179SDavid Teigland #include "config.h" 18*e7fd4179SDavid Teigland #include "requestqueue.h" 19*e7fd4179SDavid Teigland 20*e7fd4179SDavid Teigland struct rq_entry { 21*e7fd4179SDavid Teigland struct list_head list; 22*e7fd4179SDavid Teigland int nodeid; 23*e7fd4179SDavid Teigland char request[1]; 24*e7fd4179SDavid Teigland }; 25*e7fd4179SDavid Teigland 26*e7fd4179SDavid Teigland /* 27*e7fd4179SDavid Teigland * Requests received while the lockspace is in recovery get added to the 28*e7fd4179SDavid Teigland * request queue and processed when recovery is complete. This happens when 29*e7fd4179SDavid Teigland * the lockspace is suspended on some nodes before it is on others, or the 30*e7fd4179SDavid Teigland * lockspace is enabled on some while still suspended on others. 31*e7fd4179SDavid Teigland */ 32*e7fd4179SDavid Teigland 33*e7fd4179SDavid Teigland void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd) 34*e7fd4179SDavid Teigland { 35*e7fd4179SDavid Teigland struct rq_entry *e; 36*e7fd4179SDavid Teigland int length = hd->h_length; 37*e7fd4179SDavid Teigland 38*e7fd4179SDavid Teigland if (dlm_is_removed(ls, nodeid)) 39*e7fd4179SDavid Teigland return; 40*e7fd4179SDavid Teigland 41*e7fd4179SDavid Teigland e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); 42*e7fd4179SDavid Teigland if (!e) { 43*e7fd4179SDavid Teigland log_print("dlm_add_requestqueue: out of memory\n"); 44*e7fd4179SDavid Teigland return; 45*e7fd4179SDavid Teigland } 46*e7fd4179SDavid Teigland 47*e7fd4179SDavid Teigland e->nodeid = nodeid; 48*e7fd4179SDavid Teigland memcpy(e->request, hd, length); 49*e7fd4179SDavid Teigland 50*e7fd4179SDavid Teigland down(&ls->ls_requestqueue_lock); 51*e7fd4179SDavid Teigland list_add_tail(&e->list, &ls->ls_requestqueue); 52*e7fd4179SDavid Teigland up(&ls->ls_requestqueue_lock); 53*e7fd4179SDavid Teigland } 54*e7fd4179SDavid Teigland 55*e7fd4179SDavid Teigland int dlm_process_requestqueue(struct dlm_ls *ls) 56*e7fd4179SDavid Teigland { 57*e7fd4179SDavid Teigland struct rq_entry *e; 58*e7fd4179SDavid Teigland struct dlm_header *hd; 59*e7fd4179SDavid Teigland int error = 0; 60*e7fd4179SDavid Teigland 61*e7fd4179SDavid Teigland down(&ls->ls_requestqueue_lock); 62*e7fd4179SDavid Teigland 63*e7fd4179SDavid Teigland for (;;) { 64*e7fd4179SDavid Teigland if (list_empty(&ls->ls_requestqueue)) { 65*e7fd4179SDavid Teigland up(&ls->ls_requestqueue_lock); 66*e7fd4179SDavid Teigland error = 0; 67*e7fd4179SDavid Teigland break; 68*e7fd4179SDavid Teigland } 69*e7fd4179SDavid Teigland e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); 70*e7fd4179SDavid Teigland up(&ls->ls_requestqueue_lock); 71*e7fd4179SDavid Teigland 72*e7fd4179SDavid Teigland hd = (struct dlm_header *) e->request; 73*e7fd4179SDavid Teigland error = dlm_receive_message(hd, e->nodeid, TRUE); 74*e7fd4179SDavid Teigland 75*e7fd4179SDavid Teigland if (error == -EINTR) { 76*e7fd4179SDavid Teigland /* entry is left on requestqueue */ 77*e7fd4179SDavid Teigland log_debug(ls, "process_requestqueue abort eintr"); 78*e7fd4179SDavid Teigland break; 79*e7fd4179SDavid Teigland } 80*e7fd4179SDavid Teigland 81*e7fd4179SDavid Teigland down(&ls->ls_requestqueue_lock); 82*e7fd4179SDavid Teigland list_del(&e->list); 83*e7fd4179SDavid Teigland kfree(e); 84*e7fd4179SDavid Teigland 85*e7fd4179SDavid Teigland if (dlm_locking_stopped(ls)) { 86*e7fd4179SDavid Teigland log_debug(ls, "process_requestqueue abort running"); 87*e7fd4179SDavid Teigland up(&ls->ls_requestqueue_lock); 88*e7fd4179SDavid Teigland error = -EINTR; 89*e7fd4179SDavid Teigland break; 90*e7fd4179SDavid Teigland } 91*e7fd4179SDavid Teigland schedule(); 92*e7fd4179SDavid Teigland } 93*e7fd4179SDavid Teigland 94*e7fd4179SDavid Teigland return error; 95*e7fd4179SDavid Teigland } 96*e7fd4179SDavid Teigland 97*e7fd4179SDavid Teigland /* 98*e7fd4179SDavid Teigland * After recovery is done, locking is resumed and dlm_recoverd takes all the 99*e7fd4179SDavid Teigland * saved requests and processes them as they would have been by dlm_recvd. At 100*e7fd4179SDavid Teigland * the same time, dlm_recvd will start receiving new requests from remote 101*e7fd4179SDavid Teigland * nodes. We want to delay dlm_recvd processing new requests until 102*e7fd4179SDavid Teigland * dlm_recoverd has finished processing the old saved requests. 103*e7fd4179SDavid Teigland */ 104*e7fd4179SDavid Teigland 105*e7fd4179SDavid Teigland void dlm_wait_requestqueue(struct dlm_ls *ls) 106*e7fd4179SDavid Teigland { 107*e7fd4179SDavid Teigland for (;;) { 108*e7fd4179SDavid Teigland down(&ls->ls_requestqueue_lock); 109*e7fd4179SDavid Teigland if (list_empty(&ls->ls_requestqueue)) 110*e7fd4179SDavid Teigland break; 111*e7fd4179SDavid Teigland if (dlm_locking_stopped(ls)) 112*e7fd4179SDavid Teigland break; 113*e7fd4179SDavid Teigland up(&ls->ls_requestqueue_lock); 114*e7fd4179SDavid Teigland schedule(); 115*e7fd4179SDavid Teigland } 116*e7fd4179SDavid Teigland up(&ls->ls_requestqueue_lock); 117*e7fd4179SDavid Teigland } 118*e7fd4179SDavid Teigland 119*e7fd4179SDavid Teigland static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) 120*e7fd4179SDavid Teigland { 121*e7fd4179SDavid Teigland uint32_t type = ms->m_type; 122*e7fd4179SDavid Teigland 123*e7fd4179SDavid Teigland if (dlm_is_removed(ls, nodeid)) 124*e7fd4179SDavid Teigland return 1; 125*e7fd4179SDavid Teigland 126*e7fd4179SDavid Teigland /* directory operations are always purged because the directory is 127*e7fd4179SDavid Teigland always rebuilt during recovery and the lookups resent */ 128*e7fd4179SDavid Teigland 129*e7fd4179SDavid Teigland if (type == DLM_MSG_REMOVE || 130*e7fd4179SDavid Teigland type == DLM_MSG_LOOKUP || 131*e7fd4179SDavid Teigland type == DLM_MSG_LOOKUP_REPLY) 132*e7fd4179SDavid Teigland return 1; 133*e7fd4179SDavid Teigland 134*e7fd4179SDavid Teigland if (!dlm_no_directory(ls)) 135*e7fd4179SDavid Teigland return 0; 136*e7fd4179SDavid Teigland 137*e7fd4179SDavid Teigland /* with no directory, the master is likely to change as a part of 138*e7fd4179SDavid Teigland recovery; requests to/from the defunct master need to be purged */ 139*e7fd4179SDavid Teigland 140*e7fd4179SDavid Teigland switch (type) { 141*e7fd4179SDavid Teigland case DLM_MSG_REQUEST: 142*e7fd4179SDavid Teigland case DLM_MSG_CONVERT: 143*e7fd4179SDavid Teigland case DLM_MSG_UNLOCK: 144*e7fd4179SDavid Teigland case DLM_MSG_CANCEL: 145*e7fd4179SDavid Teigland /* we're no longer the master of this resource, the sender 146*e7fd4179SDavid Teigland will resend to the new master (see waiter_needs_recovery) */ 147*e7fd4179SDavid Teigland 148*e7fd4179SDavid Teigland if (dlm_hash2nodeid(ls, ms->m_hash) != dlm_our_nodeid()) 149*e7fd4179SDavid Teigland return 1; 150*e7fd4179SDavid Teigland break; 151*e7fd4179SDavid Teigland 152*e7fd4179SDavid Teigland case DLM_MSG_REQUEST_REPLY: 153*e7fd4179SDavid Teigland case DLM_MSG_CONVERT_REPLY: 154*e7fd4179SDavid Teigland case DLM_MSG_UNLOCK_REPLY: 155*e7fd4179SDavid Teigland case DLM_MSG_CANCEL_REPLY: 156*e7fd4179SDavid Teigland case DLM_MSG_GRANT: 157*e7fd4179SDavid Teigland /* this reply is from the former master of the resource, 158*e7fd4179SDavid Teigland we'll resend to the new master if needed */ 159*e7fd4179SDavid Teigland 160*e7fd4179SDavid Teigland if (dlm_hash2nodeid(ls, ms->m_hash) != nodeid) 161*e7fd4179SDavid Teigland return 1; 162*e7fd4179SDavid Teigland break; 163*e7fd4179SDavid Teigland } 164*e7fd4179SDavid Teigland 165*e7fd4179SDavid Teigland return 0; 166*e7fd4179SDavid Teigland } 167*e7fd4179SDavid Teigland 168*e7fd4179SDavid Teigland void dlm_purge_requestqueue(struct dlm_ls *ls) 169*e7fd4179SDavid Teigland { 170*e7fd4179SDavid Teigland struct dlm_message *ms; 171*e7fd4179SDavid Teigland struct rq_entry *e, *safe; 172*e7fd4179SDavid Teigland 173*e7fd4179SDavid Teigland down(&ls->ls_requestqueue_lock); 174*e7fd4179SDavid Teigland list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { 175*e7fd4179SDavid Teigland ms = (struct dlm_message *) e->request; 176*e7fd4179SDavid Teigland 177*e7fd4179SDavid Teigland if (purge_request(ls, ms, e->nodeid)) { 178*e7fd4179SDavid Teigland list_del(&e->list); 179*e7fd4179SDavid Teigland kfree(e); 180*e7fd4179SDavid Teigland } 181*e7fd4179SDavid Teigland } 182*e7fd4179SDavid Teigland up(&ls->ls_requestqueue_lock); 183*e7fd4179SDavid Teigland } 184*e7fd4179SDavid Teigland 185