1*7c478bd9Sstevel@tonic-gate /* 2*7c478bd9Sstevel@tonic-gate * CDDL HEADER START 3*7c478bd9Sstevel@tonic-gate * 4*7c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*7c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*7c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*7c478bd9Sstevel@tonic-gate * with the License. 8*7c478bd9Sstevel@tonic-gate * 9*7c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*7c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*7c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 12*7c478bd9Sstevel@tonic-gate * and limitations under the License. 13*7c478bd9Sstevel@tonic-gate * 14*7c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*7c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*7c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*7c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*7c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*7c478bd9Sstevel@tonic-gate * 20*7c478bd9Sstevel@tonic-gate * CDDL HEADER END 21*7c478bd9Sstevel@tonic-gate */ 22*7c478bd9Sstevel@tonic-gate /* 23*7c478bd9Sstevel@tonic-gate * Copyright (c) 1994-2000 by Sun Microsystems, Inc. 24*7c478bd9Sstevel@tonic-gate * All rights reserved. 25*7c478bd9Sstevel@tonic-gate */ 26*7c478bd9Sstevel@tonic-gate 27*7c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*7c478bd9Sstevel@tonic-gate 29*7c478bd9Sstevel@tonic-gate #include <sys/types.h> 30*7c478bd9Sstevel@tonic-gate #include <sys/param.h> 31*7c478bd9Sstevel@tonic-gate #include <sys/systm.h> /* for bzero */ 32*7c478bd9Sstevel@tonic-gate #include <sys/machlock.h> 33*7c478bd9Sstevel@tonic-gate #include <sys/spl.h> 34*7c478bd9Sstevel@tonic-gate #include <sys/promif.h> 35*7c478bd9Sstevel@tonic-gate #include <sys/debug.h> 36*7c478bd9Sstevel@tonic-gate 37*7c478bd9Sstevel@tonic-gate #include "tnf_buf.h" 38*7c478bd9Sstevel@tonic-gate 39*7c478bd9Sstevel@tonic-gate /* 40*7c478bd9Sstevel@tonic-gate * Defines 41*7c478bd9Sstevel@tonic-gate */ 42*7c478bd9Sstevel@tonic-gate 43*7c478bd9Sstevel@tonic-gate #define TNFW_B_ALLOC_LO 0x1 44*7c478bd9Sstevel@tonic-gate #define TNFW_B_MAXALLOCTRY 32 45*7c478bd9Sstevel@tonic-gate 46*7c478bd9Sstevel@tonic-gate #define TNF_MAXALLOC (TNF_BLOCK_SIZE - sizeof (tnf_block_header_t)) 47*7c478bd9Sstevel@tonic-gate 48*7c478bd9Sstevel@tonic-gate /* 49*7c478bd9Sstevel@tonic-gate * Globals 50*7c478bd9Sstevel@tonic-gate */ 51*7c478bd9Sstevel@tonic-gate 52*7c478bd9Sstevel@tonic-gate TNFW_B_STATE tnfw_b_state = TNFW_B_NOBUFFER | TNFW_B_STOPPED; 53*7c478bd9Sstevel@tonic-gate 54*7c478bd9Sstevel@tonic-gate /* 55*7c478bd9Sstevel@tonic-gate * Locals 56*7c478bd9Sstevel@tonic-gate */ 57*7c478bd9Sstevel@tonic-gate 58*7c478bd9Sstevel@tonic-gate static int spinlock_spl; 59*7c478bd9Sstevel@tonic-gate 60*7c478bd9Sstevel@tonic-gate /* 61*7c478bd9Sstevel@tonic-gate * Declarations 62*7c478bd9Sstevel@tonic-gate */ 63*7c478bd9Sstevel@tonic-gate 64*7c478bd9Sstevel@tonic-gate static tnf_block_header_t *tnfw_b_alloc_block(tnf_buf_file_header_t *, 65*7c478bd9Sstevel@tonic-gate enum tnf_alloc_mode); 66*7c478bd9Sstevel@tonic-gate 67*7c478bd9Sstevel@tonic-gate /* 68*7c478bd9Sstevel@tonic-gate * (Private) Allocate a new block. Return NULL on failure and mark 69*7c478bd9Sstevel@tonic-gate * tracing as broken. 'istag' is non-zero if the block is to be 70*7c478bd9Sstevel@tonic-gate * non-reclaimable. All blocks are returned A-locked. 71*7c478bd9Sstevel@tonic-gate */ 72*7c478bd9Sstevel@tonic-gate 73*7c478bd9Sstevel@tonic-gate static tnf_block_header_t * 74*7c478bd9Sstevel@tonic-gate tnfw_b_alloc_block(tnf_buf_file_header_t *fh, enum tnf_alloc_mode istag) 75*7c478bd9Sstevel@tonic-gate { 76*7c478bd9Sstevel@tonic-gate tnf_block_header_t *block; 77*7c478bd9Sstevel@tonic-gate ulong_t bcount; 78*7c478bd9Sstevel@tonic-gate ulong_t tmp_bn, bn, new_bn; 79*7c478bd9Sstevel@tonic-gate ulong_t tmp_gen, gen, new_gen; 80*7c478bd9Sstevel@tonic-gate ulong_t next; 81*7c478bd9Sstevel@tonic-gate int i; 82*7c478bd9Sstevel@tonic-gate lock_t *lp; 83*7c478bd9Sstevel@tonic-gate ushort_t spl; 84*7c478bd9Sstevel@tonic-gate 85*7c478bd9Sstevel@tonic-gate if (tnfw_b_state != TNFW_B_RUNNING) 86*7c478bd9Sstevel@tonic-gate return (NULL); 87*7c478bd9Sstevel@tonic-gate 88*7c478bd9Sstevel@tonic-gate lp = &fh->lock; 89*7c478bd9Sstevel@tonic-gate 90*7c478bd9Sstevel@tonic-gate /* 91*7c478bd9Sstevel@tonic-gate * Check reserved area first for tag block allocations 92*7c478bd9Sstevel@tonic-gate * Tag allocations are rare, so we move the code out of line 93*7c478bd9Sstevel@tonic-gate */ 94*7c478bd9Sstevel@tonic-gate if (istag) 95*7c478bd9Sstevel@tonic-gate goto try_reserved; 96*7c478bd9Sstevel@tonic-gate 97*7c478bd9Sstevel@tonic-gate try_loop: 98*7c478bd9Sstevel@tonic-gate /* 99*7c478bd9Sstevel@tonic-gate * Search for a block, using hint as starting point. 100*7c478bd9Sstevel@tonic-gate */ 101*7c478bd9Sstevel@tonic-gate 102*7c478bd9Sstevel@tonic-gate bcount = fh->com.block_count; /* total block count */ 103*7c478bd9Sstevel@tonic-gate 104*7c478bd9Sstevel@tonic-gate gen = fh->next_alloc.gen; 105*7c478bd9Sstevel@tonic-gate bn = fh->next_alloc.block[gen & TNFW_B_ALLOC_LO]; 106*7c478bd9Sstevel@tonic-gate 107*7c478bd9Sstevel@tonic-gate for (i = 0; i < TNFW_B_MAXALLOCTRY; i++) { 108*7c478bd9Sstevel@tonic-gate 109*7c478bd9Sstevel@tonic-gate /* 110*7c478bd9Sstevel@tonic-gate * Calculate next (not this) block to look for. 111*7c478bd9Sstevel@tonic-gate * Needed for updating the hint. 112*7c478bd9Sstevel@tonic-gate */ 113*7c478bd9Sstevel@tonic-gate if ((new_bn = bn + 1) >= bcount) { 114*7c478bd9Sstevel@tonic-gate new_bn = TNFW_B_DATA_BLOCK_BEGIN >> TNF_BLOCK_SHIFT; 115*7c478bd9Sstevel@tonic-gate new_gen = gen + 1; 116*7c478bd9Sstevel@tonic-gate } else 117*7c478bd9Sstevel@tonic-gate new_gen = gen; 118*7c478bd9Sstevel@tonic-gate 119*7c478bd9Sstevel@tonic-gate /* 120*7c478bd9Sstevel@tonic-gate * Try to reserve candidate block 121*7c478bd9Sstevel@tonic-gate */ 122*7c478bd9Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 123*7c478bd9Sstevel@tonic-gate block = (tnf_block_header_t *) 124*7c478bd9Sstevel@tonic-gate ((char *)fh + (bn << TNF_BLOCK_SHIFT)); 125*7c478bd9Sstevel@tonic-gate 126*7c478bd9Sstevel@tonic-gate if (lock_try(&block->A_lock)) 127*7c478bd9Sstevel@tonic-gate if (block->generation < gen && 128*7c478bd9Sstevel@tonic-gate lock_try(&block->B_lock)) 129*7c478bd9Sstevel@tonic-gate goto update_hint; 130*7c478bd9Sstevel@tonic-gate else 131*7c478bd9Sstevel@tonic-gate lock_clear(&block->A_lock); 132*7c478bd9Sstevel@tonic-gate 133*7c478bd9Sstevel@tonic-gate /* Reload hint values */ 134*7c478bd9Sstevel@tonic-gate gen = fh->next_alloc.gen; 135*7c478bd9Sstevel@tonic-gate bn = fh->next_alloc.block[gen & TNFW_B_ALLOC_LO]; 136*7c478bd9Sstevel@tonic-gate 137*7c478bd9Sstevel@tonic-gate /* adjust if we know a little better than the hint */ 138*7c478bd9Sstevel@tonic-gate if ((new_bn > bn && new_gen == gen) || new_gen > gen) { 139*7c478bd9Sstevel@tonic-gate gen = new_gen; 140*7c478bd9Sstevel@tonic-gate bn = new_bn; 141*7c478bd9Sstevel@tonic-gate } 142*7c478bd9Sstevel@tonic-gate } 143*7c478bd9Sstevel@tonic-gate 144*7c478bd9Sstevel@tonic-gate goto loop_fail; 145*7c478bd9Sstevel@tonic-gate 146*7c478bd9Sstevel@tonic-gate update_hint: 147*7c478bd9Sstevel@tonic-gate /* 148*7c478bd9Sstevel@tonic-gate * Re-read the hint and update it only if we'll be increasing it. 149*7c478bd9Sstevel@tonic-gate */ 150*7c478bd9Sstevel@tonic-gate lock_set_spl(lp, spinlock_spl, &spl); 151*7c478bd9Sstevel@tonic-gate tmp_gen = fh->next_alloc.gen; 152*7c478bd9Sstevel@tonic-gate tmp_bn = fh->next_alloc.block[tmp_gen & TNFW_B_ALLOC_LO]; 153*7c478bd9Sstevel@tonic-gate 154*7c478bd9Sstevel@tonic-gate if ((new_gen == tmp_gen && new_bn > tmp_bn) || new_gen > tmp_gen) { 155*7c478bd9Sstevel@tonic-gate /* 156*7c478bd9Sstevel@tonic-gate * Order is important here! It is the write to 157*7c478bd9Sstevel@tonic-gate * next_alloc.gen that atomically records the new 158*7c478bd9Sstevel@tonic-gate * value. 159*7c478bd9Sstevel@tonic-gate */ 160*7c478bd9Sstevel@tonic-gate fh->next_alloc.block[new_gen & TNFW_B_ALLOC_LO] = new_bn; 161*7c478bd9Sstevel@tonic-gate fh->next_alloc.gen = new_gen; 162*7c478bd9Sstevel@tonic-gate } 163*7c478bd9Sstevel@tonic-gate lock_clear_splx(lp, spl); 164*7c478bd9Sstevel@tonic-gate 165*7c478bd9Sstevel@tonic-gate got_block: 166*7c478bd9Sstevel@tonic-gate /* 167*7c478bd9Sstevel@tonic-gate * Initialize and return the block 168*7c478bd9Sstevel@tonic-gate */ 169*7c478bd9Sstevel@tonic-gate /* ASSERT(block->tag == TNF_BLOCK_HEADER_TAG); */ 170*7c478bd9Sstevel@tonic-gate block->bytes_valid = sizeof (tnf_block_header_t); 171*7c478bd9Sstevel@tonic-gate block->next_block = NULL; 172*7c478bd9Sstevel@tonic-gate /* LINTED assignment of 64-bit integer to 32-bit integer */ 173*7c478bd9Sstevel@tonic-gate block->generation = istag ? TNF_TAG_GENERATION_NUM : gen; 174*7c478bd9Sstevel@tonic-gate /* ASSERT(LOCK_HELD(&block->A_lock); */ 175*7c478bd9Sstevel@tonic-gate lock_clear(&block->B_lock); 176*7c478bd9Sstevel@tonic-gate return (block); 177*7c478bd9Sstevel@tonic-gate 178*7c478bd9Sstevel@tonic-gate try_reserved: 179*7c478bd9Sstevel@tonic-gate /* 180*7c478bd9Sstevel@tonic-gate * Look for a free tag block in reserved area 181*7c478bd9Sstevel@tonic-gate */ 182*7c478bd9Sstevel@tonic-gate next = fh->next_tag_alloc; 183*7c478bd9Sstevel@tonic-gate while (next < (TNFW_B_DATA_BLOCK_BEGIN >> TNF_BLOCK_SHIFT)) { 184*7c478bd9Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 185*7c478bd9Sstevel@tonic-gate block = (tnf_block_header_t *) 186*7c478bd9Sstevel@tonic-gate ((char *)fh + (next << TNF_BLOCK_SHIFT)); 187*7c478bd9Sstevel@tonic-gate next++; 188*7c478bd9Sstevel@tonic-gate /* 189*7c478bd9Sstevel@tonic-gate * See if block is unclaimed. 190*7c478bd9Sstevel@tonic-gate * Don't bother clearing the A-lock if the 191*7c478bd9Sstevel@tonic-gate * block was claimed and released, since it 192*7c478bd9Sstevel@tonic-gate * will never be reallocated anyway. 193*7c478bd9Sstevel@tonic-gate */ 194*7c478bd9Sstevel@tonic-gate if (lock_try(&block->A_lock) && 195*7c478bd9Sstevel@tonic-gate block->generation == 0) { 196*7c478bd9Sstevel@tonic-gate lock_set_spl(lp, spinlock_spl, &spl); 197*7c478bd9Sstevel@tonic-gate if (next > fh->next_tag_alloc) 198*7c478bd9Sstevel@tonic-gate fh->next_tag_alloc = next; 199*7c478bd9Sstevel@tonic-gate lock_clear_splx(lp, spl); 200*7c478bd9Sstevel@tonic-gate goto got_block; 201*7c478bd9Sstevel@tonic-gate } 202*7c478bd9Sstevel@tonic-gate } 203*7c478bd9Sstevel@tonic-gate goto try_loop; 204*7c478bd9Sstevel@tonic-gate 205*7c478bd9Sstevel@tonic-gate loop_fail: 206*7c478bd9Sstevel@tonic-gate /* 207*7c478bd9Sstevel@tonic-gate * Only get here if we failed the for loop 208*7c478bd9Sstevel@tonic-gate */ 209*7c478bd9Sstevel@tonic-gate ASSERT(i == TNFW_B_MAXALLOCTRY); 210*7c478bd9Sstevel@tonic-gate tnfw_b_state = TNFW_B_BROKEN; 211*7c478bd9Sstevel@tonic-gate #ifdef DEBUG 212*7c478bd9Sstevel@tonic-gate prom_printf("kernel probes: alloc_block failed\n"); 213*7c478bd9Sstevel@tonic-gate #endif 214*7c478bd9Sstevel@tonic-gate return (NULL); 215*7c478bd9Sstevel@tonic-gate 216*7c478bd9Sstevel@tonic-gate } 217*7c478bd9Sstevel@tonic-gate 218*7c478bd9Sstevel@tonic-gate /* 219*7c478bd9Sstevel@tonic-gate * Allocate size bytes from the trace buffer. Return NULL on failure, 220*7c478bd9Sstevel@tonic-gate * and mark tracing as broken. We're guaranteed that the buffer will 221*7c478bd9Sstevel@tonic-gate * not be deallocated while we're in this routine. 222*7c478bd9Sstevel@tonic-gate * Allocation requests must be word-sized and are word-aligned. 223*7c478bd9Sstevel@tonic-gate */ 224*7c478bd9Sstevel@tonic-gate 225*7c478bd9Sstevel@tonic-gate void * 226*7c478bd9Sstevel@tonic-gate tnfw_b_alloc(TNFW_B_WCB *wcb, size_t size, enum tnf_alloc_mode istag) 227*7c478bd9Sstevel@tonic-gate { 228*7c478bd9Sstevel@tonic-gate TNFW_B_POS *pos; 229*7c478bd9Sstevel@tonic-gate ushort_t offset; 230*7c478bd9Sstevel@tonic-gate void *destp; 231*7c478bd9Sstevel@tonic-gate tnf_block_header_t *block, *new_block; 232*7c478bd9Sstevel@tonic-gate 233*7c478bd9Sstevel@tonic-gate pos = &wcb->tnfw_w_pos; /* common case */ 234*7c478bd9Sstevel@tonic-gate if (istag) 235*7c478bd9Sstevel@tonic-gate pos = &wcb->tnfw_w_tag_pos; 236*7c478bd9Sstevel@tonic-gate block = pos->tnfw_w_block; 237*7c478bd9Sstevel@tonic-gate offset = pos->tnfw_w_write_off; 238*7c478bd9Sstevel@tonic-gate /* Round size up to a multiple of 8. */ 239*7c478bd9Sstevel@tonic-gate size = (size + 7) & ~7; 240*7c478bd9Sstevel@tonic-gate 241*7c478bd9Sstevel@tonic-gate if (block == NULL || offset + size > TNF_BLOCK_SIZE) { 242*7c478bd9Sstevel@tonic-gate 243*7c478bd9Sstevel@tonic-gate /* Get a new block */ 244*7c478bd9Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 245*7c478bd9Sstevel@tonic-gate new_block = tnfw_b_alloc_block(TNF_FILE_HEADER(), istag); 246*7c478bd9Sstevel@tonic-gate if (new_block == NULL) 247*7c478bd9Sstevel@tonic-gate /* tracing has been marked as broken at this point */ 248*7c478bd9Sstevel@tonic-gate return (NULL); 249*7c478bd9Sstevel@tonic-gate 250*7c478bd9Sstevel@tonic-gate /* ASSERT(size <= TNF_MAXALLOC); */ 251*7c478bd9Sstevel@tonic-gate 252*7c478bd9Sstevel@tonic-gate /* 253*7c478bd9Sstevel@tonic-gate * If the old block is clean (i.e., we're in a new 254*7c478bd9Sstevel@tonic-gate * transaction), just release it. Else, pad it out 255*7c478bd9Sstevel@tonic-gate * and attach it to the list of uncommitted blocks. 256*7c478bd9Sstevel@tonic-gate */ 257*7c478bd9Sstevel@tonic-gate if (block != NULL) { 258*7c478bd9Sstevel@tonic-gate if (block->bytes_valid == offset && 259*7c478bd9Sstevel@tonic-gate !pos->tnfw_w_dirty) { 260*7c478bd9Sstevel@tonic-gate /* block is clean: release it */ 261*7c478bd9Sstevel@tonic-gate lock_clear(&block->A_lock); 262*7c478bd9Sstevel@tonic-gate } else { 263*7c478bd9Sstevel@tonic-gate /* block is dirty */ 264*7c478bd9Sstevel@tonic-gate ulong_t *p, *q; 265*7c478bd9Sstevel@tonic-gate 266*7c478bd9Sstevel@tonic-gate /* LINTED pointer cast */ 267*7c478bd9Sstevel@tonic-gate p = (ulong_t *)((char *)block + offset); 268*7c478bd9Sstevel@tonic-gate /* LINTED pointer cast */ 269*7c478bd9Sstevel@tonic-gate q = (ulong_t *)((char *)block + TNF_BLOCK_SIZE); 270*7c478bd9Sstevel@tonic-gate while (p < q) 271*7c478bd9Sstevel@tonic-gate *p++ = TNF_NULL; 272*7c478bd9Sstevel@tonic-gate 273*7c478bd9Sstevel@tonic-gate /* append block to release list */ 274*7c478bd9Sstevel@tonic-gate new_block->next_block = block; 275*7c478bd9Sstevel@tonic-gate 276*7c478bd9Sstevel@tonic-gate /* we have at least one dirty block */ 277*7c478bd9Sstevel@tonic-gate pos->tnfw_w_dirty = 1; 278*7c478bd9Sstevel@tonic-gate } 279*7c478bd9Sstevel@tonic-gate } 280*7c478bd9Sstevel@tonic-gate 281*7c478bd9Sstevel@tonic-gate /* make new_block the current block */ 282*7c478bd9Sstevel@tonic-gate pos->tnfw_w_block = block = new_block; 283*7c478bd9Sstevel@tonic-gate /* write_off is updated below */ 284*7c478bd9Sstevel@tonic-gate offset = sizeof (tnf_block_header_t); 285*7c478bd9Sstevel@tonic-gate /* ASSERT(new_block->bytes_valid == offset); */ 286*7c478bd9Sstevel@tonic-gate } 287*7c478bd9Sstevel@tonic-gate 288*7c478bd9Sstevel@tonic-gate destp = (char *)block + offset; 289*7c478bd9Sstevel@tonic-gate /* update write_off */ 290*7c478bd9Sstevel@tonic-gate pos->tnfw_w_write_off = offset + size; 291*7c478bd9Sstevel@tonic-gate /* 292*7c478bd9Sstevel@tonic-gate * Unconditionally write a 0 into the last word allocated, 293*7c478bd9Sstevel@tonic-gate * in case we left an alignment gap. (Assume that doing an 294*7c478bd9Sstevel@tonic-gate * unconditional write is cheaper than testing and branching 295*7c478bd9Sstevel@tonic-gate * around the write half the time.) 296*7c478bd9Sstevel@tonic-gate */ 297*7c478bd9Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 298*7c478bd9Sstevel@tonic-gate *((int *)((char *)destp + size - sizeof (int))) = 0; 299*7c478bd9Sstevel@tonic-gate return (destp); 300*7c478bd9Sstevel@tonic-gate } 301*7c478bd9Sstevel@tonic-gate 302*7c478bd9Sstevel@tonic-gate /* 303*7c478bd9Sstevel@tonic-gate * Allocate a directory entry. 304*7c478bd9Sstevel@tonic-gate */ 305*7c478bd9Sstevel@tonic-gate 306*7c478bd9Sstevel@tonic-gate /*ARGSUSED0*/ 307*7c478bd9Sstevel@tonic-gate void * 308*7c478bd9Sstevel@tonic-gate tnfw_b_fw_alloc(TNFW_B_WCB *wcb) 309*7c478bd9Sstevel@tonic-gate { 310*7c478bd9Sstevel@tonic-gate tnf_buf_file_header_t *fh; 311*7c478bd9Sstevel@tonic-gate lock_t *lp; 312*7c478bd9Sstevel@tonic-gate ushort_t spl; 313*7c478bd9Sstevel@tonic-gate caddr_t cell; 314*7c478bd9Sstevel@tonic-gate ulong_t next; 315*7c478bd9Sstevel@tonic-gate 316*7c478bd9Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 317*7c478bd9Sstevel@tonic-gate fh = TNF_FILE_HEADER(); 318*7c478bd9Sstevel@tonic-gate lp = &fh->lock; 319*7c478bd9Sstevel@tonic-gate 320*7c478bd9Sstevel@tonic-gate lock_set_spl(lp, spinlock_spl, &spl); 321*7c478bd9Sstevel@tonic-gate next = fh->next_fw_alloc; 322*7c478bd9Sstevel@tonic-gate if (next < TNFW_B_FW_ZONE) { 323*7c478bd9Sstevel@tonic-gate cell = (caddr_t)fh + next; 324*7c478bd9Sstevel@tonic-gate fh->next_fw_alloc = next + sizeof (tnf_ref32_t); 325*7c478bd9Sstevel@tonic-gate } else 326*7c478bd9Sstevel@tonic-gate cell = NULL; 327*7c478bd9Sstevel@tonic-gate lock_clear_splx(lp, spl); 328*7c478bd9Sstevel@tonic-gate 329*7c478bd9Sstevel@tonic-gate return (cell); 330*7c478bd9Sstevel@tonic-gate } 331*7c478bd9Sstevel@tonic-gate 332*7c478bd9Sstevel@tonic-gate /* 333*7c478bd9Sstevel@tonic-gate * Initialize a buffer. 334*7c478bd9Sstevel@tonic-gate */ 335*7c478bd9Sstevel@tonic-gate 336*7c478bd9Sstevel@tonic-gate void 337*7c478bd9Sstevel@tonic-gate tnfw_b_init_buffer(caddr_t buf, size_t size) 338*7c478bd9Sstevel@tonic-gate { 339*7c478bd9Sstevel@tonic-gate int gen_shift; 340*7c478bd9Sstevel@tonic-gate int i; 341*7c478bd9Sstevel@tonic-gate ulong_t b; 342*7c478bd9Sstevel@tonic-gate ulong_t blocks; 343*7c478bd9Sstevel@tonic-gate tnf_block_header_t *block; 344*7c478bd9Sstevel@tonic-gate tnf_buf_file_header_t *fh; 345*7c478bd9Sstevel@tonic-gate 346*7c478bd9Sstevel@tonic-gate /* Compute platform-specific spinlock_spl */ 347*7c478bd9Sstevel@tonic-gate spinlock_spl = __ipltospl(LOCK_LEVEL + 1); 348*7c478bd9Sstevel@tonic-gate 349*7c478bd9Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 350*7c478bd9Sstevel@tonic-gate fh = (tnf_buf_file_header_t *)buf; 351*7c478bd9Sstevel@tonic-gate 352*7c478bd9Sstevel@tonic-gate /* LINTED logical expression always true: op "||" */ 353*7c478bd9Sstevel@tonic-gate ASSERT(TNF_DIRECTORY_SIZE > TNF_BLOCK_SIZE); 354*7c478bd9Sstevel@tonic-gate 355*7c478bd9Sstevel@tonic-gate /* 356*7c478bd9Sstevel@tonic-gate * This assertion is needed because we cannot change 357*7c478bd9Sstevel@tonic-gate * sys/tnf_com.h this late in the release cycle, but we need the 358*7c478bd9Sstevel@tonic-gate * interface in sys/machlock.h for locking operations. 359*7c478bd9Sstevel@tonic-gate */ 360*7c478bd9Sstevel@tonic-gate /* LINTED logical expression always true: op "||" */ 361*7c478bd9Sstevel@tonic-gate ASSERT(sizeof (tnf_byte_lock_t) == sizeof (lock_t)); 362*7c478bd9Sstevel@tonic-gate 363*7c478bd9Sstevel@tonic-gate /* Calculate number of blocks */ 364*7c478bd9Sstevel@tonic-gate blocks = size >> TNF_BLOCK_SHIFT; 365*7c478bd9Sstevel@tonic-gate 366*7c478bd9Sstevel@tonic-gate /* Calculate generation shift */ 367*7c478bd9Sstevel@tonic-gate gen_shift = 0; 368*7c478bd9Sstevel@tonic-gate b = 1; 369*7c478bd9Sstevel@tonic-gate while (b < blocks) { 370*7c478bd9Sstevel@tonic-gate b <<= 1; 371*7c478bd9Sstevel@tonic-gate ++gen_shift; 372*7c478bd9Sstevel@tonic-gate } 373*7c478bd9Sstevel@tonic-gate ASSERT(gen_shift < 32); 374*7c478bd9Sstevel@tonic-gate 375*7c478bd9Sstevel@tonic-gate /* fill in file header */ 376*7c478bd9Sstevel@tonic-gate /* magic number comes last */ 377*7c478bd9Sstevel@tonic-gate /* LINTED constant truncated by assignment */ 378*7c478bd9Sstevel@tonic-gate fh->com.tag = TNF_FILE_HEADER_TAG; 379*7c478bd9Sstevel@tonic-gate fh->com.file_version = TNF_FILE_VERSION; 380*7c478bd9Sstevel@tonic-gate fh->com.file_header_size = sizeof (tnf_file_header_t); 381*7c478bd9Sstevel@tonic-gate fh->com.file_log_size = gen_shift + TNF_BLOCK_SHIFT; 382*7c478bd9Sstevel@tonic-gate fh->com.block_header_size = sizeof (tnf_block_header_t); 383*7c478bd9Sstevel@tonic-gate fh->com.block_size = TNF_BLOCK_SIZE; 384*7c478bd9Sstevel@tonic-gate fh->com.directory_size = TNF_DIRECTORY_SIZE; 385*7c478bd9Sstevel@tonic-gate /* LINTED assignment of 64-bit integer to 32-bit integer */ 386*7c478bd9Sstevel@tonic-gate fh->com.block_count = blocks; 387*7c478bd9Sstevel@tonic-gate /* com.blocks_valid is unused */ 388*7c478bd9Sstevel@tonic-gate fh->next_alloc.gen = 1; 389*7c478bd9Sstevel@tonic-gate fh->next_alloc.block[0] = 0; 390*7c478bd9Sstevel@tonic-gate fh->next_alloc.block[1] = TNFW_B_DATA_BLOCK_BEGIN >> TNF_BLOCK_SHIFT; 391*7c478bd9Sstevel@tonic-gate fh->next_tag_alloc = TNF_DIRECTORY_SIZE >> TNF_BLOCK_SHIFT; 392*7c478bd9Sstevel@tonic-gate fh->next_fw_alloc = TNF_DIRENT_LAST + 4; 393*7c478bd9Sstevel@tonic-gate LOCK_INIT_CLEAR(&fh->lock); 394*7c478bd9Sstevel@tonic-gate 395*7c478bd9Sstevel@tonic-gate (void) bzero(buf + sizeof (*fh), TNF_DIRECTORY_SIZE - sizeof (*fh)); 396*7c478bd9Sstevel@tonic-gate i = TNF_DIRECTORY_SIZE >> TNF_BLOCK_SHIFT; 397*7c478bd9Sstevel@tonic-gate for (; i < blocks; ++i) { 398*7c478bd9Sstevel@tonic-gate /* LINTED pointer cast may result in improper alignment */ 399*7c478bd9Sstevel@tonic-gate block = (tnf_block_header_t *)(buf + (i << TNF_BLOCK_SHIFT)); 400*7c478bd9Sstevel@tonic-gate block->tag = (tnf_ref32_t)TNF_BLOCK_HEADER_TAG; 401*7c478bd9Sstevel@tonic-gate block->generation = 0; 402*7c478bd9Sstevel@tonic-gate block->bytes_valid = sizeof (tnf_block_header_t); 403*7c478bd9Sstevel@tonic-gate LOCK_INIT_CLEAR(&block->A_lock); 404*7c478bd9Sstevel@tonic-gate LOCK_INIT_CLEAR(&block->B_lock); 405*7c478bd9Sstevel@tonic-gate } 406*7c478bd9Sstevel@tonic-gate 407*7c478bd9Sstevel@tonic-gate /* snap in magic number */ 408*7c478bd9Sstevel@tonic-gate fh->magic = TNF_MAGIC; 409*7c478bd9Sstevel@tonic-gate } 410