1 /* 2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_format.h" 21 #include "xfs_log_format.h" 22 #include "xfs_trans_resv.h" 23 #include "xfs_mount.h" 24 #include "xfs_inode.h" 25 #include "xfs_quota.h" 26 #include "xfs_error.h" 27 #include "xfs_trans.h" 28 #include "xfs_buf_item.h" 29 #include "xfs_trans_priv.h" 30 #include "xfs_qm.h" 31 #include "xfs_log.h" 32 33 static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip) 34 { 35 return container_of(lip, struct xfs_dq_logitem, qli_item); 36 } 37 38 /* 39 * returns the number of iovecs needed to log the given dquot item. 40 */ 41 STATIC void 42 xfs_qm_dquot_logitem_size( 43 struct xfs_log_item *lip, 44 int *nvecs, 45 int *nbytes) 46 { 47 *nvecs += 2; 48 *nbytes += sizeof(struct xfs_dq_logformat) + 49 sizeof(struct xfs_disk_dquot); 50 } 51 52 /* 53 * fills in the vector of log iovecs for the given dquot log item. 54 */ 55 STATIC void 56 xfs_qm_dquot_logitem_format( 57 struct xfs_log_item *lip, 58 struct xfs_log_vec *lv) 59 { 60 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); 61 struct xfs_log_iovec *vecp = NULL; 62 struct xfs_dq_logformat *qlf; 63 64 qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QFORMAT); 65 qlf->qlf_type = XFS_LI_DQUOT; 66 qlf->qlf_size = 2; 67 qlf->qlf_id = be32_to_cpu(qlip->qli_dquot->q_core.d_id); 68 qlf->qlf_blkno = qlip->qli_dquot->q_blkno; 69 qlf->qlf_len = 1; 70 qlf->qlf_boffset = qlip->qli_dquot->q_bufoffset; 71 xlog_finish_iovec(lv, vecp, sizeof(struct xfs_dq_logformat)); 72 73 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_DQUOT, 74 &qlip->qli_dquot->q_core, 75 sizeof(struct xfs_disk_dquot)); 76 } 77 78 /* 79 * Increment the pin count of the given dquot. 80 */ 81 STATIC void 82 xfs_qm_dquot_logitem_pin( 83 struct xfs_log_item *lip) 84 { 85 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; 86 87 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 88 atomic_inc(&dqp->q_pincount); 89 } 90 91 /* 92 * Decrement the pin count of the given dquot, and wake up 93 * anyone in xfs_dqwait_unpin() if the count goes to 0. The 94 * dquot must have been previously pinned with a call to 95 * xfs_qm_dquot_logitem_pin(). 96 */ 97 STATIC void 98 xfs_qm_dquot_logitem_unpin( 99 struct xfs_log_item *lip, 100 int remove) 101 { 102 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; 103 104 ASSERT(atomic_read(&dqp->q_pincount) > 0); 105 if (atomic_dec_and_test(&dqp->q_pincount)) 106 wake_up(&dqp->q_pinwait); 107 } 108 109 STATIC xfs_lsn_t 110 xfs_qm_dquot_logitem_committed( 111 struct xfs_log_item *lip, 112 xfs_lsn_t lsn) 113 { 114 /* 115 * We always re-log the entire dquot when it becomes dirty, 116 * so, the latest copy _is_ the only one that matters. 117 */ 118 return lsn; 119 } 120 121 /* 122 * This is called to wait for the given dquot to be unpinned. 123 * Most of these pin/unpin routines are plagiarized from inode code. 124 */ 125 void 126 xfs_qm_dqunpin_wait( 127 struct xfs_dquot *dqp) 128 { 129 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 130 if (atomic_read(&dqp->q_pincount) == 0) 131 return; 132 133 /* 134 * Give the log a push so we don't wait here too long. 135 */ 136 xfs_log_force(dqp->q_mount, 0); 137 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); 138 } 139 140 /* 141 * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer 142 * have been failed during writeback 143 * 144 * this informs the AIL that the dquot is already flush locked on the next push, 145 * and acquires a hold on the buffer to ensure that it isn't reclaimed before 146 * dirty data makes it to disk. 147 */ 148 STATIC void 149 xfs_dquot_item_error( 150 struct xfs_log_item *lip, 151 struct xfs_buf *bp) 152 { 153 ASSERT(!completion_done(&DQUOT_ITEM(lip)->qli_dquot->q_flush)); 154 xfs_set_li_failed(lip, bp); 155 } 156 157 STATIC uint 158 xfs_qm_dquot_logitem_push( 159 struct xfs_log_item *lip, 160 struct list_head *buffer_list) 161 __releases(&lip->li_ailp->ail_lock) 162 __acquires(&lip->li_ailp->ail_lock) 163 { 164 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; 165 struct xfs_buf *bp = lip->li_buf; 166 uint rval = XFS_ITEM_SUCCESS; 167 int error; 168 169 if (atomic_read(&dqp->q_pincount) > 0) 170 return XFS_ITEM_PINNED; 171 172 /* 173 * The buffer containing this item failed to be written back 174 * previously. Resubmit the buffer for IO 175 */ 176 if (test_bit(XFS_LI_FAILED, &lip->li_flags)) { 177 if (!xfs_buf_trylock(bp)) 178 return XFS_ITEM_LOCKED; 179 180 if (!xfs_buf_resubmit_failed_buffers(bp, buffer_list)) 181 rval = XFS_ITEM_FLUSHING; 182 183 xfs_buf_unlock(bp); 184 return rval; 185 } 186 187 if (!xfs_dqlock_nowait(dqp)) 188 return XFS_ITEM_LOCKED; 189 190 /* 191 * Re-check the pincount now that we stabilized the value by 192 * taking the quota lock. 193 */ 194 if (atomic_read(&dqp->q_pincount) > 0) { 195 rval = XFS_ITEM_PINNED; 196 goto out_unlock; 197 } 198 199 /* 200 * Someone else is already flushing the dquot. Nothing we can do 201 * here but wait for the flush to finish and remove the item from 202 * the AIL. 203 */ 204 if (!xfs_dqflock_nowait(dqp)) { 205 rval = XFS_ITEM_FLUSHING; 206 goto out_unlock; 207 } 208 209 spin_unlock(&lip->li_ailp->ail_lock); 210 211 error = xfs_qm_dqflush(dqp, &bp); 212 if (!error) { 213 if (!xfs_buf_delwri_queue(bp, buffer_list)) 214 rval = XFS_ITEM_FLUSHING; 215 xfs_buf_relse(bp); 216 } 217 218 spin_lock(&lip->li_ailp->ail_lock); 219 out_unlock: 220 xfs_dqunlock(dqp); 221 return rval; 222 } 223 224 /* 225 * Unlock the dquot associated with the log item. 226 * Clear the fields of the dquot and dquot log item that 227 * are specific to the current transaction. If the 228 * hold flags is set, do not unlock the dquot. 229 */ 230 STATIC void 231 xfs_qm_dquot_logitem_unlock( 232 struct xfs_log_item *lip) 233 { 234 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; 235 236 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 237 238 /* 239 * Clear the transaction pointer in the dquot 240 */ 241 dqp->q_transp = NULL; 242 243 /* 244 * dquots are never 'held' from getting unlocked at the end of 245 * a transaction. Their locking and unlocking is hidden inside the 246 * transaction layer, within trans_commit. Hence, no LI_HOLD flag 247 * for the logitem. 248 */ 249 xfs_dqunlock(dqp); 250 } 251 252 /* 253 * this needs to stamp an lsn into the dquot, I think. 254 * rpc's that look at user dquot's would then have to 255 * push on the dependency recorded in the dquot 256 */ 257 STATIC void 258 xfs_qm_dquot_logitem_committing( 259 struct xfs_log_item *lip, 260 xfs_lsn_t lsn) 261 { 262 } 263 264 /* 265 * This is the ops vector for dquots 266 */ 267 static const struct xfs_item_ops xfs_dquot_item_ops = { 268 .iop_size = xfs_qm_dquot_logitem_size, 269 .iop_format = xfs_qm_dquot_logitem_format, 270 .iop_pin = xfs_qm_dquot_logitem_pin, 271 .iop_unpin = xfs_qm_dquot_logitem_unpin, 272 .iop_unlock = xfs_qm_dquot_logitem_unlock, 273 .iop_committed = xfs_qm_dquot_logitem_committed, 274 .iop_push = xfs_qm_dquot_logitem_push, 275 .iop_committing = xfs_qm_dquot_logitem_committing, 276 .iop_error = xfs_dquot_item_error 277 }; 278 279 /* 280 * Initialize the dquot log item for a newly allocated dquot. 281 * The dquot isn't locked at this point, but it isn't on any of the lists 282 * either, so we don't care. 283 */ 284 void 285 xfs_qm_dquot_logitem_init( 286 struct xfs_dquot *dqp) 287 { 288 struct xfs_dq_logitem *lp = &dqp->q_logitem; 289 290 xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT, 291 &xfs_dquot_item_ops); 292 lp->qli_dquot = dqp; 293 } 294 295 /*------------------ QUOTAOFF LOG ITEMS -------------------*/ 296 297 static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip) 298 { 299 return container_of(lip, struct xfs_qoff_logitem, qql_item); 300 } 301 302 303 /* 304 * This returns the number of iovecs needed to log the given quotaoff item. 305 * We only need 1 iovec for an quotaoff item. It just logs the 306 * quotaoff_log_format structure. 307 */ 308 STATIC void 309 xfs_qm_qoff_logitem_size( 310 struct xfs_log_item *lip, 311 int *nvecs, 312 int *nbytes) 313 { 314 *nvecs += 1; 315 *nbytes += sizeof(struct xfs_qoff_logitem); 316 } 317 318 STATIC void 319 xfs_qm_qoff_logitem_format( 320 struct xfs_log_item *lip, 321 struct xfs_log_vec *lv) 322 { 323 struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip); 324 struct xfs_log_iovec *vecp = NULL; 325 struct xfs_qoff_logformat *qlf; 326 327 qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QUOTAOFF); 328 qlf->qf_type = XFS_LI_QUOTAOFF; 329 qlf->qf_size = 1; 330 qlf->qf_flags = qflip->qql_flags; 331 xlog_finish_iovec(lv, vecp, sizeof(struct xfs_qoff_logitem)); 332 } 333 334 /* 335 * Pinning has no meaning for an quotaoff item, so just return. 336 */ 337 STATIC void 338 xfs_qm_qoff_logitem_pin( 339 struct xfs_log_item *lip) 340 { 341 } 342 343 /* 344 * Since pinning has no meaning for an quotaoff item, unpinning does 345 * not either. 346 */ 347 STATIC void 348 xfs_qm_qoff_logitem_unpin( 349 struct xfs_log_item *lip, 350 int remove) 351 { 352 } 353 354 /* 355 * There isn't much you can do to push a quotaoff item. It is simply 356 * stuck waiting for the log to be flushed to disk. 357 */ 358 STATIC uint 359 xfs_qm_qoff_logitem_push( 360 struct xfs_log_item *lip, 361 struct list_head *buffer_list) 362 { 363 return XFS_ITEM_LOCKED; 364 } 365 366 /* 367 * Quotaoff items have no locking or pushing, so return failure 368 * so that the caller doesn't bother with us. 369 */ 370 STATIC void 371 xfs_qm_qoff_logitem_unlock( 372 struct xfs_log_item *lip) 373 { 374 } 375 376 /* 377 * The quotaoff-start-item is logged only once and cannot be moved in the log, 378 * so simply return the lsn at which it's been logged. 379 */ 380 STATIC xfs_lsn_t 381 xfs_qm_qoff_logitem_committed( 382 struct xfs_log_item *lip, 383 xfs_lsn_t lsn) 384 { 385 return lsn; 386 } 387 388 STATIC xfs_lsn_t 389 xfs_qm_qoffend_logitem_committed( 390 struct xfs_log_item *lip, 391 xfs_lsn_t lsn) 392 { 393 struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip); 394 struct xfs_qoff_logitem *qfs = qfe->qql_start_lip; 395 struct xfs_ail *ailp = qfs->qql_item.li_ailp; 396 397 /* 398 * Delete the qoff-start logitem from the AIL. 399 * xfs_trans_ail_delete() drops the AIL lock. 400 */ 401 spin_lock(&ailp->ail_lock); 402 xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR); 403 404 kmem_free(qfs->qql_item.li_lv_shadow); 405 kmem_free(lip->li_lv_shadow); 406 kmem_free(qfs); 407 kmem_free(qfe); 408 return (xfs_lsn_t)-1; 409 } 410 411 /* 412 * XXX rcc - don't know quite what to do with this. I think we can 413 * just ignore it. The only time that isn't the case is if we allow 414 * the client to somehow see that quotas have been turned off in which 415 * we can't allow that to get back until the quotaoff hits the disk. 416 * So how would that happen? Also, do we need different routines for 417 * quotaoff start and quotaoff end? I suspect the answer is yes but 418 * to be sure, I need to look at the recovery code and see how quota off 419 * recovery is handled (do we roll forward or back or do something else). 420 * If we roll forwards or backwards, then we need two separate routines, 421 * one that does nothing and one that stamps in the lsn that matters 422 * (truly makes the quotaoff irrevocable). If we do something else, 423 * then maybe we don't need two. 424 */ 425 STATIC void 426 xfs_qm_qoff_logitem_committing( 427 struct xfs_log_item *lip, 428 xfs_lsn_t commit_lsn) 429 { 430 } 431 432 static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { 433 .iop_size = xfs_qm_qoff_logitem_size, 434 .iop_format = xfs_qm_qoff_logitem_format, 435 .iop_pin = xfs_qm_qoff_logitem_pin, 436 .iop_unpin = xfs_qm_qoff_logitem_unpin, 437 .iop_unlock = xfs_qm_qoff_logitem_unlock, 438 .iop_committed = xfs_qm_qoffend_logitem_committed, 439 .iop_push = xfs_qm_qoff_logitem_push, 440 .iop_committing = xfs_qm_qoff_logitem_committing 441 }; 442 443 /* 444 * This is the ops vector shared by all quotaoff-start log items. 445 */ 446 static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = { 447 .iop_size = xfs_qm_qoff_logitem_size, 448 .iop_format = xfs_qm_qoff_logitem_format, 449 .iop_pin = xfs_qm_qoff_logitem_pin, 450 .iop_unpin = xfs_qm_qoff_logitem_unpin, 451 .iop_unlock = xfs_qm_qoff_logitem_unlock, 452 .iop_committed = xfs_qm_qoff_logitem_committed, 453 .iop_push = xfs_qm_qoff_logitem_push, 454 .iop_committing = xfs_qm_qoff_logitem_committing 455 }; 456 457 /* 458 * Allocate and initialize an quotaoff item of the correct quota type(s). 459 */ 460 struct xfs_qoff_logitem * 461 xfs_qm_qoff_logitem_init( 462 struct xfs_mount *mp, 463 struct xfs_qoff_logitem *start, 464 uint flags) 465 { 466 struct xfs_qoff_logitem *qf; 467 468 qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP); 469 470 xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? 471 &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); 472 qf->qql_item.li_mountp = mp; 473 qf->qql_start_lip = start; 474 qf->qql_flags = flags; 475 return qf; 476 } 477