1 /*-
2 * See the file LICENSE for redistribution information.
3 *
4 * Copyright (c) 1996, 1997, 1998
5 * Sleepycat Software. All rights reserved.
6 */
7 #include "config.h"
8
9 #ifndef lint
10 static const char sccsid[] = "@(#)mp_sync.c 10.31 (Sleepycat) 12/11/98";
11 #endif /* not lint */
12
13 #ifndef NO_SYSTEM_INCLUDES
14 #include <sys/types.h>
15
16 #include <errno.h>
17 #include <stdlib.h>
18 #endif
19
20 #include "db_int.h"
21 #include "shqueue.h"
22 #include "db_shash.h"
23 #include "mp.h"
24 #include "common_ext.h"
25
26 static int __bhcmp __P((const void *, const void *));
27 static int __memp_fsync __P((DB_MPOOLFILE *));
28
29 /*
30 * memp_sync --
31 * Mpool sync function.
32 */
33 int
memp_sync(dbmp,lsnp)34 memp_sync(dbmp, lsnp)
35 DB_MPOOL *dbmp;
36 DB_LSN *lsnp;
37 {
38 BH *bhp, **bharray;
39 DB_ENV *dbenv;
40 MPOOL *mp;
41 MPOOLFILE *mfp;
42 int ar_cnt, nalloc, next, maxpin, ret, wrote;
43
44 MP_PANIC_CHECK(dbmp);
45
46 dbenv = dbmp->dbenv;
47 mp = dbmp->mp;
48
49 if (dbenv->lg_info == NULL) {
50 __db_err(dbenv, "memp_sync: requires logging");
51 return (EINVAL);
52 }
53
54 /*
55 * We try and write the buffers in page order: it should reduce seeks
56 * by the underlying filesystem and possibly reduce the actual number
57 * of writes. We don't want to hold the region lock while we write
58 * the buffers, so only hold it lock while we create a list. Get a
59 * good-size block of memory to hold buffer pointers, we don't want
60 * to run out.
61 */
62 LOCKREGION(dbmp);
63 nalloc = mp->stat.st_page_dirty + mp->stat.st_page_dirty / 2 + 10;
64 UNLOCKREGION(dbmp);
65
66 if ((ret = __os_malloc(nalloc * sizeof(BH *), NULL, &bharray)) != 0)
67 return (ret);
68
69 LOCKREGION(dbmp);
70
71 /*
72 * If the application is asking about a previous call to memp_sync(),
73 * and we haven't found any buffers that the application holding the
74 * pin couldn't write, return yes or no based on the current count.
75 * Note, if the application is asking about a LSN *smaller* than one
76 * we've already handled or are currently handling, then we return a
77 * result based on the count for the larger LSN.
78 */
79 if (!F_ISSET(mp, MP_LSN_RETRY) && log_compare(lsnp, &mp->lsn) <= 0) {
80 if (mp->lsn_cnt == 0) {
81 *lsnp = mp->lsn;
82 ret = 0;
83 } else
84 ret = DB_INCOMPLETE;
85 goto done;
86 }
87
88 /* Else, it's a new checkpoint. */
89 F_CLR(mp, MP_LSN_RETRY);
90
91 /*
92 * Save the LSN. We know that it's a new LSN or larger than the one
93 * for which we were already doing a checkpoint. (BTW, I don't expect
94 * to see multiple LSN's from the same or multiple processes, but You
95 * Just Never Know. Responding as if they all called with the largest
96 * of the LSNs specified makes everything work.)
97 *
98 * We don't currently use the LSN we save. We could potentially save
99 * the last-written LSN in each buffer header and use it to determine
100 * what buffers need to be written. The problem with this is that it's
101 * sizeof(LSN) more bytes of buffer header. We currently write all the
102 * dirty buffers instead.
103 *
104 * Walk the list of shared memory segments clearing the count of
105 * buffers waiting to be written.
106 */
107 mp->lsn = *lsnp;
108 mp->lsn_cnt = 0;
109 for (mfp = SH_TAILQ_FIRST(&dbmp->mp->mpfq, __mpoolfile);
110 mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile))
111 mfp->lsn_cnt = 0;
112
113 /*
114 * Walk the list of buffers and mark all dirty buffers to be written
115 * and all pinned buffers to be potentially written (we can't know if
116 * we'll need to write them until the holding process returns them to
117 * the cache). We do this in one pass while holding the region locked
118 * so that processes can't make new buffers dirty, causing us to never
119 * finish. Since the application may have restarted the sync, clear
120 * any BH_WRITE flags that appear to be left over from previous calls.
121 *
122 * We don't want to pin down the entire buffer cache, otherwise we'll
123 * starve threads needing new pages. Don't pin down more than 80% of
124 * the cache.
125 *
126 * Keep a count of the total number of buffers we need to write in
127 * MPOOL->lsn_cnt, and for each file, in MPOOLFILE->lsn_count.
128 */
129 ar_cnt = 0;
130 maxpin = ((mp->stat.st_page_dirty + mp->stat.st_page_clean) * 8) / 10;
131 for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh);
132 bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh))
133 if (F_ISSET(bhp, BH_DIRTY) || bhp->ref != 0) {
134 F_SET(bhp, BH_WRITE);
135
136 ++mp->lsn_cnt;
137
138 mfp = R_ADDR(dbmp, bhp->mf_offset);
139 ++mfp->lsn_cnt;
140
141 /*
142 * If the buffer isn't in use, we should be able to
143 * write it immediately, so increment the reference
144 * count to lock it and its contents down, and then
145 * save a reference to it.
146 *
147 * If we've run out space to store buffer references,
148 * we're screwed. We don't want to realloc the array
149 * while holding a region lock, so we set the flag to
150 * force the checkpoint to be done again, from scratch,
151 * later.
152 *
153 * If we've pinned down too much of the cache stop, and
154 * set a flag to force the checkpoint to be tried again
155 * later.
156 */
157 if (bhp->ref == 0) {
158 ++bhp->ref;
159 bharray[ar_cnt] = bhp;
160 if (++ar_cnt >= nalloc || ar_cnt >= maxpin) {
161 F_SET(mp, MP_LSN_RETRY);
162 break;
163 }
164 }
165 } else
166 if (F_ISSET(bhp, BH_WRITE))
167 F_CLR(bhp, BH_WRITE);
168
169 /* If there no buffers we can write immediately, we're done. */
170 if (ar_cnt == 0) {
171 ret = mp->lsn_cnt ? DB_INCOMPLETE : 0;
172 goto done;
173 }
174
175 UNLOCKREGION(dbmp);
176
177 /* Sort the buffers we're going to write. */
178 qsort(bharray, ar_cnt, sizeof(BH *), __bhcmp);
179
180 LOCKREGION(dbmp);
181
182 /* Walk the array, writing buffers. */
183 for (next = 0; next < ar_cnt; ++next) {
184 /*
185 * It's possible for a thread to have gotten the buffer since
186 * we listed it for writing. If the reference count is still
187 * 1, we're the only ones using the buffer, go ahead and write.
188 * If it's >1, then skip the buffer and assume that it will be
189 * written when it's returned to the cache.
190 */
191 if (bharray[next]->ref > 1) {
192 --bharray[next]->ref;
193 continue;
194 }
195
196 /* Write the buffer. */
197 mfp = R_ADDR(dbmp, bharray[next]->mf_offset);
198 ret = __memp_bhwrite(dbmp, mfp, bharray[next], NULL, &wrote);
199
200 /* Release the buffer. */
201 --bharray[next]->ref;
202
203 /* If there's an error, release the rest of the buffers. */
204 if (ret != 0 || !wrote) {
205 /*
206 * Any process syncing the shared memory buffer pool
207 * had better be able to write to any underlying file.
208 * Be understanding, but firm, on this point.
209 */
210 if (ret == 0) {
211 __db_err(dbenv, "%s: unable to flush page: %lu",
212 __memp_fns(dbmp, mfp),
213 (u_long)bharray[next]->pgno);
214 ret = EPERM;
215 }
216
217 while (++next < ar_cnt)
218 --bharray[next]->ref;
219 goto err;
220 }
221 }
222 ret = mp->lsn_cnt != 0 ||
223 F_ISSET(mp, MP_LSN_RETRY) ? DB_INCOMPLETE : 0;
224
225 done:
226 if (0) {
227 err: /*
228 * On error, clear:
229 * MPOOL->lsn_cnt (the total sync count)
230 * MPOOLFILE->lsn_cnt (the per-file sync count)
231 * BH_WRITE flag (the scheduled for writing flag)
232 */
233 mp->lsn_cnt = 0;
234 for (mfp = SH_TAILQ_FIRST(&dbmp->mp->mpfq, __mpoolfile);
235 mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile))
236 mfp->lsn_cnt = 0;
237 for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh);
238 bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh))
239 F_CLR(bhp, BH_WRITE);
240 }
241 UNLOCKREGION(dbmp);
242 __os_free(bharray, nalloc * sizeof(BH *));
243 return (ret);
244 }
245
246 /*
247 * memp_fsync --
248 * Mpool file sync function.
249 */
250 int
memp_fsync(dbmfp)251 memp_fsync(dbmfp)
252 DB_MPOOLFILE *dbmfp;
253 {
254 DB_MPOOL *dbmp;
255 int is_tmp;
256
257 dbmp = dbmfp->dbmp;
258
259 MP_PANIC_CHECK(dbmp);
260
261 /*
262 * If this handle doesn't have a file descriptor that's open for
263 * writing, or if the file is a temporary, there's no reason to
264 * proceed further.
265 */
266 if (F_ISSET(dbmfp, MP_READONLY))
267 return (0);
268
269 LOCKREGION(dbmp);
270 is_tmp = F_ISSET(dbmfp->mfp, MP_TEMP);
271 UNLOCKREGION(dbmp);
272 if (is_tmp)
273 return (0);
274
275 return (__memp_fsync(dbmfp));
276 }
277
278 /*
279 * __mp_xxx_fd --
280 * Return a file descriptor for DB 1.85 compatibility locking.
281 *
282 * PUBLIC: int __mp_xxx_fd __P((DB_MPOOLFILE *, int *));
283 */
284 int
__mp_xxx_fd(dbmfp,fdp)285 __mp_xxx_fd(dbmfp, fdp)
286 DB_MPOOLFILE *dbmfp;
287 int *fdp;
288 {
289 int ret;
290
291 /*
292 * This is a truly spectacular layering violation, intended ONLY to
293 * support compatibility for the DB 1.85 DB->fd call.
294 *
295 * Sync the database file to disk, creating the file as necessary.
296 *
297 * We skip the MP_READONLY and MP_TEMP tests done by memp_fsync(3).
298 * The MP_READONLY test isn't interesting because we will either
299 * already have a file descriptor (we opened the database file for
300 * reading) or we aren't readonly (we created the database which
301 * requires write privileges). The MP_TEMP test isn't interesting
302 * because we want to write to the backing file regardless so that
303 * we get a file descriptor to return.
304 */
305 ret = dbmfp->fd == -1 ? __memp_fsync(dbmfp) : 0;
306
307 return ((*fdp = dbmfp->fd) == -1 ? ENOENT : ret);
308 }
309
310 /*
311 * __memp_fsync --
312 * Mpool file internal sync function.
313 */
314 static int
__memp_fsync(dbmfp)315 __memp_fsync(dbmfp)
316 DB_MPOOLFILE *dbmfp;
317 {
318 BH *bhp, **bharray;
319 DB_MPOOL *dbmp;
320 MPOOL *mp;
321 size_t mf_offset;
322 int ar_cnt, incomplete, nalloc, next, ret, wrote;
323
324 ret = 0;
325 dbmp = dbmfp->dbmp;
326 mp = dbmp->mp;
327 mf_offset = R_OFFSET(dbmp, dbmfp->mfp);
328
329 /*
330 * We try and write the buffers in page order: it should reduce seeks
331 * by the underlying filesystem and possibly reduce the actual number
332 * of writes. We don't want to hold the region lock while we write
333 * the buffers, so only hold it lock while we create a list. Get a
334 * good-size block of memory to hold buffer pointers, we don't want
335 * to run out.
336 */
337 LOCKREGION(dbmp);
338 nalloc = mp->stat.st_page_dirty + mp->stat.st_page_dirty / 2 + 10;
339 UNLOCKREGION(dbmp);
340
341 if ((ret = __os_malloc(nalloc * sizeof(BH *), NULL, &bharray)) != 0)
342 return (ret);
343
344 LOCKREGION(dbmp);
345
346 /*
347 * Walk the LRU list of buffer headers, and get a list of buffers to
348 * write for this MPOOLFILE.
349 */
350 ar_cnt = incomplete = 0;
351 for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh);
352 bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) {
353 if (!F_ISSET(bhp, BH_DIRTY) || bhp->mf_offset != mf_offset)
354 continue;
355 if (bhp->ref != 0 || F_ISSET(bhp, BH_LOCKED)) {
356 incomplete = 1;
357 continue;
358 }
359
360 ++bhp->ref;
361 bharray[ar_cnt] = bhp;
362
363 /*
364 * If we've run out space to store buffer references, we're
365 * screwed, as we don't want to realloc the array holding a
366 * region lock. Set the incomplete flag -- the only way we
367 * can get here is if the file is active in the buffer cache,
368 * which is the same thing as finding pinned buffers.
369 */
370 if (++ar_cnt >= nalloc) {
371 incomplete = 1;
372 break;
373 }
374 }
375
376 UNLOCKREGION(dbmp);
377
378 /* Sort the buffers we're going to write. */
379 if (ar_cnt != 0)
380 qsort(bharray, ar_cnt, sizeof(BH *), __bhcmp);
381
382 LOCKREGION(dbmp);
383
384 /* Walk the array, writing buffers. */
385 for (next = 0; next < ar_cnt; ++next) {
386 /*
387 * It's possible for a thread to have gotten the buffer since
388 * we listed it for writing. If the reference count is still
389 * 1, we're the only ones using the buffer, go ahead and write.
390 * If it's >1, then skip the buffer.
391 */
392 if (bharray[next]->ref > 1) {
393 incomplete = 1;
394
395 --bharray[next]->ref;
396 continue;
397 }
398
399 /* Write the buffer. */
400 ret = __memp_pgwrite(dbmfp, bharray[next], NULL, &wrote);
401
402 /* Release the buffer. */
403 --bharray[next]->ref;
404
405 /* If there's an error, release the rest of the buffers. */
406 if (ret != 0) {
407 while (++next < ar_cnt)
408 --bharray[next]->ref;
409 goto err;
410 }
411
412 /*
413 * If we didn't write the buffer for some reason, don't return
414 * success.
415 */
416 if (!wrote)
417 incomplete = 1;
418 }
419
420 err: UNLOCKREGION(dbmp);
421
422 __os_free(bharray, nalloc * sizeof(BH *));
423
424 /*
425 * Sync the underlying file as the last thing we do, so that the OS
426 * has maximal opportunity to flush buffers before we request it.
427 *
428 * XXX:
429 * Don't lock the region around the sync, fsync(2) has no atomicity
430 * issues.
431 */
432 if (ret == 0)
433 return (incomplete ? DB_INCOMPLETE : __os_fsync(dbmfp->fd));
434 return (ret);
435 }
436
437 /*
438 * memp_trickle --
439 * Keep a specified percentage of the buffers clean.
440 */
441 int
memp_trickle(dbmp,pct,nwrotep)442 memp_trickle(dbmp, pct, nwrotep)
443 DB_MPOOL *dbmp;
444 int pct, *nwrotep;
445 {
446 BH *bhp;
447 MPOOL *mp;
448 MPOOLFILE *mfp;
449 db_pgno_t pgno;
450 u_long total;
451 int ret, wrote;
452
453 MP_PANIC_CHECK(dbmp);
454
455 mp = dbmp->mp;
456 if (nwrotep != NULL)
457 *nwrotep = 0;
458
459 if (pct < 1 || pct > 100)
460 return (EINVAL);
461
462 LOCKREGION(dbmp);
463
464 /*
465 * If there are sufficient clean buffers, or no buffers or no dirty
466 * buffers, we're done.
467 *
468 * XXX
469 * Using st_page_clean and st_page_dirty is our only choice at the
470 * moment, but it's not as correct as we might like in the presence
471 * of pools with more than one buffer size, as a free 512-byte buffer
472 * isn't the same as a free 8K buffer.
473 */
474 loop: total = mp->stat.st_page_clean + mp->stat.st_page_dirty;
475 if (total == 0 || mp->stat.st_page_dirty == 0 ||
476 (mp->stat.st_page_clean * 100) / total >= (u_long)pct) {
477 UNLOCKREGION(dbmp);
478 return (0);
479 }
480
481 /* Loop until we write a buffer. */
482 for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh);
483 bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) {
484 if (bhp->ref != 0 ||
485 !F_ISSET(bhp, BH_DIRTY) || F_ISSET(bhp, BH_LOCKED))
486 continue;
487
488 mfp = R_ADDR(dbmp, bhp->mf_offset);
489
490 /*
491 * We can't write to temporary files -- see the comment in
492 * mp_bh.c:__memp_bhwrite().
493 */
494 if (F_ISSET(mfp, MP_TEMP))
495 continue;
496
497 pgno = bhp->pgno;
498 if ((ret = __memp_bhwrite(dbmp, mfp, bhp, NULL, &wrote)) != 0)
499 goto err;
500
501 /*
502 * Any process syncing the shared memory buffer pool had better
503 * be able to write to any underlying file. Be understanding,
504 * but firm, on this point.
505 */
506 if (!wrote) {
507 __db_err(dbmp->dbenv, "%s: unable to flush page: %lu",
508 __memp_fns(dbmp, mfp), (u_long)pgno);
509 ret = EPERM;
510 goto err;
511 }
512
513 ++mp->stat.st_page_trickle;
514 if (nwrotep != NULL)
515 ++*nwrotep;
516 goto loop;
517 }
518
519 /* No more buffers to write. */
520 ret = 0;
521
522 err: UNLOCKREGION(dbmp);
523 return (ret);
524 }
525
526 static int
__bhcmp(p1,p2)527 __bhcmp(p1, p2)
528 const void *p1, *p2;
529 {
530 BH *bhp1, *bhp2;
531
532 bhp1 = *(BH * const *)p1;
533 bhp2 = *(BH * const *)p2;
534
535 /* Sort by file (shared memory pool offset). */
536 if (bhp1->mf_offset < bhp2->mf_offset)
537 return (-1);
538 if (bhp1->mf_offset > bhp2->mf_offset)
539 return (1);
540
541 /*
542 * !!!
543 * Defend against badly written quicksort code calling the comparison
544 * function with two identical pointers (e.g., WATCOM C++ (Power++)).
545 */
546 if (bhp1->pgno < bhp2->pgno)
547 return (-1);
548 if (bhp1->pgno > bhp2->pgno)
549 return (1);
550 return (0);
551 }
552