$modcal$

$linenum 7000$
$lines 54$

$partial_eval on$

$range off$
$ovflcheck off$
$debug off$

{
{ HFS CACHE
{ handles caching for
{       superblocks
{       cgroups
{       inode
{       data (indir blocks, dir contents)
}


module hfscache;

$search 'hfstuff', 'hfscalc', 'hfsupport'$

import
    sysdevs,
    hfstuff,
    asm,
    hfscalc,
    hfsupport,
    sysglobals,
    iocomasm;


{
{ Rules of usage:
{ SET-UP:
{  1.  call init_cache first
{  2.  call init_hfs_unit for every hfs unit
{  3.  call configure_cache once step 2 is finished
{ NORMAL USE:
{  1.  call get_superblock before doing anything else with a
{       given unit.
{  2.  only one unit is active at a time.  get_superblock makes it
{       active.  All other calls use the active unit.
{  3.  call sync to flush all buffers to disc.  This is NOT done
{       automatically by hfsalloc, so must be done in the DAM/TM.
{       Sync also sets all use counts to 0 (to recover from escapes),
{       so call it only when all put_* have been done.
{  4.  for debugging, call check_cache when a transaction is finished.
{       It complains if any of the cache is still in use.
{  5.  call invalidate_unit to invalidate any in-core buffers.  This
{       MUST happen when the user swaps floppies in the same drive,
{       for example.
{  6.  get/put work as follows.  Call get_* to get *.  When you're
{       through with it, call put_* with release.  If you ever
{       write to *, call put_* with dirty or immediate.
{       dirty/immediate/release can be combined as desired in
{       a single put_* call.  There is also "invalid", which means that
{       the buffer contents are invalid.
{ INTERRUPTS:
{      Because caching is done in static heap space, the hfs driver is NOT
{       reentrant with respect to interrupts.  ie, if you call the AM, DAM,
{       or the hfsTM from within an ISR, you risk messing up any lower
{       level transactions currently happening, even if you've got your
{       own FIB!
{ MEDIA CHANGE:
{      Exactly the same as "SETUP" above.
}

{
{ How the cache is set up:
{  1.  Fixed-size block allocated by init_cache.  "hfs_cache_bytes" is size
{       in bytes of cache.  This size is user-configurable,
{       in the module hfs_user.
{  2.  init_hfs_unit looks at the beginning of the superblock and
{       calculates how big each superblock and cgroup really are.
{       It remembers the biggest.  Under PAWS, it should be called
{       by TABLE.
{  3.  configure_cache takes info from step 2 and doles out the space
{       allocated in step 1.  It is called by TABLE
{       under PAWS.  Configuration consists of deciding how many
{       of each buffer we will have, and setting up the buffer headers
{       accordingly.
}


export

const
    { size of cache block in bytes.  Must be power of 2, >= 1K }
    cache_blk_size = 1024;

type
    cache_blk_type = packed array[0..cache_blk_size-1] of char;
    cache_blk_ptr_type = ^cache_blk_type;
    cache_action_type = (release, dirty, immediate, invalid, stamping);
    cache_action_set = set of cache_action_type;

    { inode a little bigger in core }
    binode_type = packed record
	inode: inode_type;
	inumber: integer;
    end;
    binode_ptr_type = ^binode_type;

var
    current_super: super_block_ptr_type;


{
{ cache control
}
procedure init_cache;
procedure init_hfs_unit(unum: integer; force: boolean);
procedure configure_cache;
procedure sync;
procedure invalidate_unit(badunit: integer);

{
{ disc access for each item
}
function  get_superblock(unum: shortint): super_block_ptr_type;
procedure put_superblock(fs: super_block_ptr_type;
			 how: cache_action_set);

function  get_cgroup(groupnum: integer): cgroup_ptr_type;
procedure put_cgroup(cgp: cgroup_ptr_type;
		     how: cache_action_set);

function  get_inode(nodenum: integer): inode_ptr_type;
procedure put_inode(ip: inode_ptr_type;
		    how: cache_action_set);

function  get_datablk(fragnum: frag_type;
		      offset: integer): cache_blk_ptr_type;
function  get_edatablk(fragnum: frag_type;
		       offset: integer): cache_blk_ptr_type;
procedure put_datablk(dp: cache_blk_ptr_type;
		      how: cache_action_set);

{
{ miscellaneous
}
function inumber(iptr: inode_ptr_type): integer;
function itype(ip: inode_ptr_type): integer;
procedure nuke_unit(unit : integer);

implement

const
    { for enumerating item types -- must begin with 1 }
    I_SB = 1;
    I_CG = 2;
    I_INODE = 3;
    I_DATABLK = 4;
    NUM_ITEMS = 4;

    { for "empty" (no disc read) data blks, <> any item type }
    I_EDATABLK = -1;

type
    { buffer control record }
    buf_hdr_ptr_type = ^buf_hdr_type;
    buf_hdr_type = packed record
	is_dirty: boolean;
	item: 1..NUM_ITEMS;
	use_count: 0..255;
	unit: shortint;
	disc_addr: integer;
	age: integer {was ushort. SFB};
	max_size: ushort;
	size1, size2: shortint;
	next: buf_hdr_ptr_type;
	case integer of
	    1: (iptr: inode_ptr_type);
	    2: (fsptr: super_block_ptr_type);
	    3: (cgptr: cgroup_ptr_type);
	    4: (dptr: cache_blk_ptr_type);
	    5: (wptr: windowp);
    end;
    buf_hdr_array_type = array[1..maxint] of buf_hdr_type;
    buf_hdr_array_ptr_type = ^buf_hdr_array_type;

type
    item_info_type = array[1..NUM_ITEMS] of integer;

const
    { max number of each item in cache }
    MAX_SBS = 2;
    MAX_CGS = 20;
    MAX_INODES = 100;
    MAX_DATABLKS = 50;

    { number of each item in cache "unit" }
    UNIT_SBS = 1;
    UNIT_CGS = 3;
    UNIT_INODES = 10;
    UNIT_DATABLKS = 3;

    { can't survive with fewer items than this }
    MIN_SBS = 1;
    MIN_CGS = 1;
    MIN_INODES = 3;
    MIN_DATABLKS = 2;

    { minimum cache size }
    MIN_CACHE_BYTES = 10*1024;

    { default cache size }
    DEF_CACHE_BYTES = 15*1024;

    { illegal unit number }
    NO_UNIT = -1;

    { and another, which can be the same }
    ALL_UNITS = -1;

    bytes_per_ptr = 4;

var
    { address of buf hdrs for each item }
    buf_hdrs_array: array[1..NUM_ITEMS] of buf_hdr_array_ptr_type;

    { linked list of buf hdrs for each item }
    buf_list_array: array[1..NUM_ITEMS] of buf_hdr_ptr_type;

    { how many of each item actually present }
    buf_count: item_info_type;

    { how many items in a tiled unit }
    unit_count: item_info_type;

    { maximum, minimum count for each item }
    max_count: item_info_type;
    min_count: item_info_type;

    { size of each item }
    item_size: item_info_type;

    { size of each buffer -- item size plus a backptr }
    buf_size: item_info_type;

    { array of buffers }
    cache_space: anyptr;
    { size of the array }
    cache_bytes: integer;

    { pseudo-clock for buffer aging }
    pclock: integer {was ushort. SFB} ;

    { where current_super lives }
    current_unit: shortint;

    { has cache been properly initialized? }
    initialized: boolean;

{-------------------------------------------------------------------------}
{
{ Miscellaneous utility routines
}

{
{ actual_sb_size
{ calculates size of sb, not including csumm info, that
{ we read and write.  this info is in superblock, but always
{ rounded up to fragment, too coarse for our purposes.
{ sb size is the fixed part, plus 1 byte for each big block
{ in a cylinder cycle.
}
function actual_sb_size(fs: super_block_ptr_type): integer;
var
    sb_bytes: integer;
begin
    sb_bytes := sizeof(fs^) + howmany(fs^.spc * fs^.cpc, nspb(fs));
    actual_sb_size := roundup(sb_bytes, DISK_SECTOR);
end;

{
{ actual_cs_size
{ size of csumm info
{ cs size is one csum structure per cylinder group
}
function actual_cs_size(fs: super_block_ptr_type): integer;
var
    cs_bytes: integer;
begin
    cs_bytes := fs^.ncg * sizeof(csumm_type);
    actual_cs_size := roundup(cs_bytes, DISK_SECTOR);
end;

{
{ actual_cg_size
{ size of cg info
{ cg size is fixed part, plus 1 bit for each fragment in the
{ cylinder group.
}
function actual_cg_size(fs: super_block_ptr_type): integer;
var
    cg_bytes: integer;
begin
    cg_bytes := sizeof(cylinder_group_block_type) + howmany(fs^.fpg, NBBY);
    actual_cg_size := roundup(cg_bytes, DISK_SECTOR);
end;


{
{ assign_space
{ We have decided how many headers and buffers for each item can fit into
{ the given cache space.  Now we assign the headers and buffers for a single
{ item.  count is how
{ many we can afford.  size is the size of each buffer plus header.
{ cache_address is the address of the space we are allotted,
{ which we keep up to date.
{ The beginning of the space is taken for buffer headers,
{ and the end for buffers.  The hdrs are initialized, and the
{ hdrs and buffers are pointed to each other.
}
procedure assign_space(var cache_address: ipointer;
		       count, size: integer;
		      this_item: integer);
var
    i: integer;
    buf_hdr: buf_hdr_ptr_type;
begin
    { first get the hdr space }
    buf_hdrs_array[this_item] := buf_hdr_array_ptr_type(cache_address);
    buf_list_array[this_item] := buf_hdr_ptr_type(cache_address);
    buf_hdr := buf_hdr_ptr_type(cache_address);
    cache_address := addr(cache_address^, count*sizeof(buf_hdr_type));
    size := size - sizeof(buf_hdr_type);

    { then initialize each hdr-buffer pair }
    for i := 1 to count do begin
	with buf_hdr^ do begin
	    is_dirty := false;
	    item := this_item;
	    use_count := 0;
	    unit := NO_UNIT;
	    max_size := size - bytes_per_ptr;
	    dptr := addr(cache_address^, bytes_per_ptr);
	    if i = count then
		next := nil
	    else
		next := addr(buf_hdr^, sizeof(buf_hdr_type));
	end;
	{ back ptr from buffer to buf hdr }
	cache_address^ := integer(buf_hdr);
	cache_address := addr(cache_address^, size);
	buf_hdr := addr(buf_hdr^, sizeof(buf_hdr_type));
    end;
end;


{
{ allocate_extra
{ takes some extra cache space and allocates as much of it
{ as possible to the given item.  There is extra cache space
{ because the unit of allocation might not fit into the cache
{ space an integral number of times, or because some buf hdr
{ arrays (like sb) are kept deliberately small.
{ updates extra_bytes to show how much taken.
}
procedure allocate_extra(var extra_bytes: integer; item: integer);
var
    extra_bufs: integer;
begin
    extra_bufs := min(max_count[item] - buf_count[item],
		      extra_bytes div buf_size[item]);
    buf_count[item] := buf_count[item] + extra_bufs;
    extra_bytes := extra_bytes - (extra_bufs * buf_size[item]);
end;

{-------------------------------------------------------------------------}
{
{ buffer read and write routines
}

{
{ read_buf
{ reads a buffer from the disc
{ caller sets:
{       unit
{       disc_address
{       item
{       size1 (ignored for superblocks)
{ max_size shows the max buffer size available.
{ size1 shows how much we should read.
{ for superblocks, there is a small bootstrap problem,
{ because we don't know how much to read until we see it.
{ so we read the max size, then set size1 and size2 and
{ read the csumm info.
{ ioresult shows success or failure.
}
procedure read_buf(buf_hdr: buf_hdr_ptr_type);
label
    999;
var
    sector_buf: packed array[1..DISK_SECTOR] of char;
    sector_addr: integer;
    buf_iptr: inode_ptr_type;
    cs_size: integer;
    cs_mem_addr: csumm_array_ptr_type;
    i: integer;
begin
    with buf_hdr^ do
	case item of

	  I_INODE:
	    begin
		{ get address of sector holding this inode }
		sector_addr := rounddownp2(disc_addr, DISK_SECTOR);
		{ read in the sector }
		get_bytes(unit, DISK_SECTOR, sector_addr, addr(sector_buf));
		{ and copy the inode }
		buf_iptr := addr(sector_buf, disc_addr - sector_addr);
		moveleft(buf_iptr^, iptr^, size1);
	    end;

	  I_DATABLK, I_CG:
	    begin
		get_bytes(unit, size1, disc_addr, wptr);
		if (ioresult = ord(inoerror)) and (item = I_CG)
		and (cgptr^.magic <> CG_MAGIC) then begin
		    set_corrupt;
		    ioresult := ord(icorrupt);
		end;
	    end;

	  I_SB:
	    begin
		{ read the superblock }
		get_bytes(unit, min(SBSIZE, max_size), disc_addr, wptr);
		if fsptr^.magic <> FS_MAGIC then begin
		    ioresult := ord(inodirectory);
		    goto 999;
		end;
		if fsptr^.clean <> chr(FS_CLEAN) then
		    set_corrupt;
		size1 := actual_sb_size(fsptr);
		size2 := actual_cs_size(fsptr);
		{ read in the csumm info }
		get_bytes(unit, size2,
			  fragstobytes(fsptr, fsptr^.csaddr),
			  addr(wptr^, size1));
		{ install pointers to csumm info in superblock }
		cs_size := size2;
		cs_mem_addr := addr(wptr^, size1);
		i := 0;
		while cs_size > 0 do begin
		    fsptr^.csp[i] := cs_mem_addr;
		    cs_size := cs_size - fsptr^.bsize;
		    cs_mem_addr := addr(cs_mem_addr^, fsptr^.bsize);
		    i := i + 1;
		end;
	    end;

	 {unknown item in cache. this is DISASTER, and we must DIE!}
	  OTHERWISE begin ioresult:=ord(zdvrnoconfig); escape(-10); end;

	end;
999:
end;


{
{ write_buf
{ writes a buffer to disc
{ the size of the item is in the buffer header, as is
{ the disc address.  "size1" is the size of the item
{ currently there; if this is a superblock, then
{ "size2" gives the csumm info size.
}
procedure write_buf(buf_hdr: buf_hdr_ptr_type);
label
    999;
var
    sector_buf: packed array[1..DISK_SECTOR] of char;
    sector_addr: integer;
    buf_iptr: inode_ptr_type;
begin
    with buf_hdr^ do if unit <> NO_UNIT then begin
	case item of

	  I_INODE:
	    begin
		{ get address of sector holding this inode }
		sector_addr := rounddownp2(disc_addr, DISK_SECTOR);
		{ read in the sector }
		get_bytes(unit, DISK_SECTOR, sector_addr, addr(sector_buf));
		if ioresult <> ord(inoerror) then
		    goto 999;
		{ copy the inode }
		buf_iptr := addr(sector_buf, disc_addr - sector_addr);
		moveleft(iptr^, buf_iptr^, size1);
		{ and put the sector back }
		put_bytes(unit, DISK_SECTOR, sector_addr, addr(sector_buf));
	    end;

	  I_DATABLK, I_CG:
	    begin
		if item = I_CG then
		    cgptr^.time := sysgmttime;
		put_bytes(unit, size1, disc_addr, wptr);
	    end;

	  I_SB:
	    begin
		fsptr^.time := sysgmttime;
		put_bytes(unit, size1, disc_addr, wptr);
		if ioresult <> ord(inoerror) then
		    goto 999;
		put_bytes(unit, size2,
			  fragstobytes(fsptr, fsptr^.csaddr),
			  addr(wptr^, size1));
	    end;

	 {unknown item in cache. this is DISASTER, and we must DIE!}
	  OTHERWISE begin ioresult:=ord(zdvrnoconfig); escape(-10); end;

	end;
    end;
999:
end;



{---------------------------------------------------------------------------}
{
{ get and put buffers
}

{
{ block_in_cache
{ Returns given buffer if in cache.
{ If not in cache, returns
{       oldest unused buf hdr (if any)
{       an invalid buf hdr (if any)
{ buffers are linked in the next chain so that the most recently used
{ is first.
{ changed return type to buf_hdr_ptr_type from cache_blk_ptr_type, for
{ more generality and usefulness (see nuke_unit).
{ SFB
}
function block_in_cache(unit_wanted: integer;
			addr_wanted: integer;
			this_item: integer;
			var oldest_hdr: buf_hdr_ptr_type;
			var invalid_hdr: buf_hdr_ptr_type): buf_hdr_ptr_type;
label
    999;
var
    oldest_age: integer;
    buf_hdr, prev_buf_hdr: buf_hdr_ptr_type;
    i: integer;
begin

    if not initialized then begin
	ioresult := ord(zdvrnoconfig);
	escape(-10);
    end;

    block_in_cache := nil;

    oldest_hdr := nil;
    invalid_hdr := nil;

    buf_hdr := buf_list_array[this_item];
    prev_buf_hdr := nil;

    repeat
	with buf_hdr^ do begin
	    { found it? }
	    if (unit = unit_wanted) and (disc_addr = addr_wanted) then begin
		use_count := use_count + 1;
	       {block_in_cache := dptr; {SFB}
		block_in_cache := buf_hdr;      {SFB}
		{ maintain list in order of use }
		if prev_buf_hdr <> nil then begin
		    prev_buf_hdr^.next := next;
		    next := buf_list_array[this_item];
		    buf_list_array[this_item] := buf_hdr;
		end;
		goto 999;
	    end;
	    if invalid_hdr = nil then
		{ invalid? }
		if unit = NO_UNIT then
		    invalid_hdr := buf_hdr
		else
		{ oldest so far? }
		if use_count = 0 then
		    if (oldest_hdr = nil) or (age < oldest_age) then begin
			oldest_hdr := buf_hdr;
			oldest_age := age;
		    end;
	    prev_buf_hdr := buf_hdr;
	    buf_hdr := next;
	end;
    until buf_hdr = nil;
999:
end;

{ Procedure to really "clean up" after unexpected escape.
{ It is used whenever hfsdam gets to its main try-recover with
{ escapecode <> 0, or in get_buf, if a "foreign" (non dam target
{ unit) cache record causes escape while flushing. It tries to mark
{ the superblock dirty and flush, iff it is currently in cache, and
{ there is no error during flush. It will not try to load the
{ superblock if it's currently not there, as this is more complexity
{ and IO than we care for. It also removes cache records for that
{ unit from the cache, marks it is_corrupt in h_unitable, and
{ returns ioresult=ord(zcatchall), iff ioresult was 0 at entry.

{ Call it with the desired unum; it will figure out base_unum.
{
{ nuke_unit preserves escapecode, but always sets ioresult to something
{ non-zero.
{
{ SFB
}
procedure nuke_unit(unit : integer);
var
  in_cache_buf, oldest, invalid : buf_hdr_ptr_type;
  old_esccode, old_ior : integer;
begin
 unit := h_unitable^.tbl[unit].base_unum;
 old_ior := ioresult;
 old_esccode := escapecode;  {save so caller can use nuke_unit in
			      recover block}
 in_cache_buf := block_in_cache(unit, SBLOCK*DEV_BSIZE, I_SB, oldest,
				invalid);
 if in_cache_buf <> nil then
   try
    {mark superblock not_ok, and flush}
    in_cache_buf^.fsptr^.clean := chr(FS_NOTOK);
    write_buf(in_cache_buf);
   recover ; {ignore escapes, as we don't care if it fails}
 invalidate_unit(unit);
 h_unitable^.tbl[unit].fs_corrupt := true;
 sysescapecode := old_esccode;
 if old_ior = ord(inoerror) then
  ioresult:=ord(zcatchall)
 else
  ioresult:=old_ior;
end;



{
{ generic get routine
{ find buffer with given item and address
{ failing that, find invalid buffer (unit = NO_UNIT)
{ failing that, find oldest valid unused buffer
{ failing that, panic
{ read the disc if info not already in the cache.
{ exception: I_EDATABLK is for datablks that we shouldn't read.
}
function get_buf(unit_wanted: integer; addr_wanted: integer;
		 size, this_item: integer): cache_blk_ptr_type;
label
    999;
var
    do_read: boolean;
    buf_hdr: buf_hdr_ptr_type;
    oldest_hdr: buf_hdr_ptr_type;
    invalid_hdr: buf_hdr_ptr_type;
    in_cache_buf: buf_hdr_ptr_type;
    tmpioresult: integer;
begin
    get_buf := nil;
    tmpioresult := ioresult;
    ioresult := ord(inoerror);

    if this_item = I_EDATABLK then begin
	this_item := I_DATABLK;
	do_read := false;
    end
    else
	do_read := true;

    in_cache_buf := block_in_cache(unit_wanted, addr_wanted, this_item,
				   oldest_hdr, invalid_hdr);

    if in_cache_buf <> nil then begin
	{ block found in cache }
       {get_buf := in_cache_buf;        {SFB}
	get_buf := in_cache_buf^.dptr;  {SFB}
	goto 999;
    end;

    { not in cache.  can we use an available buffer? }
    if invalid_hdr <> nil then
	buf_hdr := invalid_hdr
    else
    if oldest_hdr <> nil then
	buf_hdr := oldest_hdr
    else begin
	ioresult := ord(zdvrcachefull);
	goto 999;
    end;

    { now we have a buffer }
    with buf_hdr^ do begin
	if is_dirty and (unit <> NO_UNIT) then
	  try   {make sure that any escape here: 1) tries to mark superblock
		 fs_notok on disc, 2) invalidates the cache for that unit,
		 3) marks corrupt in h_unitable, and 4) returns icorrupt in
		 ioresult iff the cache record was on same disc as dam
		 target disc. SFB}
	    write_buf(buf_hdr);
	  recover       {handle other media errors in-line here, and handle
			 errors on dam target unit in hfsdam cleanup code. SFB}
	   if unit = h_unitable^.tbl[unit_wanted].base_unum then
	     escape(escapecode) {let dam cleanup handle it. SFB}
	   else
	     begin
	       nuke_unit(unit);   {if not dam target unit that failed, then
				   continue with dam call. SFB}
	       ioresult := ord(inoerror);
	     end;
	is_dirty := false;
	use_count := 1;
	unit := unit_wanted;
	disc_addr := addr_wanted;
	size1 := size;
    end;

    if do_read then begin
	try
	    read_buf(buf_hdr);
	recover
	    if escapecode <> -10 then
	     ioresult := ord(zcatchall); {Exercise cleanup code. SFB};
	if ioresult <> ord(inoerror) then begin
	    buf_hdr^.use_count := 0;
	    buf_hdr^.unit := NO_UNIT;
	    goto 999;
	end;
    end;

    get_buf := buf_hdr^.dptr;
999:
    {
    { ioresult during get_buf -> escape
    { ioresult when get_buf called -> just pass it on
    }
    if ioresult <> ord(inoerror) then
	escape(-10);
    ioresult := tmpioresult;
end;

{
{ generic put routine
{ buf_addr is the buffer
{ in front of every buffer is a pointer to its hdr
{ "how" tells what to do with it:
{       release -- finished with it
{       dirty -- will need writing
{       immediate -- immediate write_through
{       invalid -- buffer contents worthless, reuse buffer whenever needed
}
procedure put_buf(anyvar buf_addr: ipointer;
		  how: cache_action_set);
label
    999;
var
    buf_hdr: buf_hdr_ptr_type;
begin
    if buf_addr = nil then
	goto 999;

    buf_addr := addr(buf_addr^, -bytes_per_ptr);
    buf_hdr := buf_hdr_ptr_type(buf_addr^);

    with buf_hdr^ do begin

	{ set the dirty bit? }
	if dirty in how then begin
	    is_dirty := true;
	end;

	{ flush the buffer? }
	if immediate in how then begin
	    try {mark corrupt for ALL escapes. SFB}
	      write_buf(buf_hdr);
	    recover
	      ioresult:=ord(icorrupt);
	    is_dirty := false;
	end;

	{ time stamping?  write only if dirty, and ignore errors }
	if stamping in how then
	    if is_dirty then begin
		try
		    write_buf(buf_hdr);
		recover
		    if escapecode<>-10 then     {SFB}
		     ioresult:=ord(icorrupt)
		    else
		     if ioresult = ord(zprotected) then
			ioresult := ord(inoerror);
		is_dirty := false;
	    end;

	if release in how then begin
	    if use_count = 1 then begin {tick pclock only when use_count going
					 to 0. SFB}
		pclock := pclock + 1;
		age := pclock;
	    end;

	    if use_count > 0 then       {added to fix 9895A bug. Note that
	       if we have range check off, and let use_count (0..255) go
	       negative, the COMPILER has generated code that doesn't
	       strip off sign extend bits before putting field back in
	       record.  Item was getting set to 7, and is_dirty to True.
	       It's ok to release more than it gets; error handling strategy
	       at higher levels of code releases all cache records, even if
	       they haven't been got yet, or they're already released.
	       It's easier that way.
	       SFB}
	     use_count := use_count - 1;
	end;

	if invalid in how then
	    { don't save buffer contents }
	    unit := NO_UNIT;
    end;
999:
end;





{--------------------------------------------------------------------------}
{
{ cache setup and configuration
}

{
{ init_cache
{ determine the size of the cache, and allocate the space.
{ buffer headers are left alone until we see how big sbs and cgs are.
}
procedure init_cache;
type
    urec = record
	user_cache_bytes: integer;
	simultaneous_hfs_discs: integer;
    end;
    urec_ptr = ^urec;
var
    i: integer;
    user_ip: urec_ptr;
    max_supers: integer;
begin
    init_support;

    user_ip := urec_ptr(value('HFS_USER_CACHE_INFO'));

    { allocate cache buffer space if not done already }
    if cache_space = nil then begin
	if user_ip = nil then
	    cache_bytes := DEF_CACHE_BYTES
	else
	    cache_bytes := max(MIN_CACHE_BYTES, user_ip^.user_cache_bytes);

	{ newbytes escapes if no memory }
	try
	    newbytes(cache_space, cache_bytes);
	    initialized := true;
	recover
	    ;
    end;

    if user_ip = nil then
	max_supers := MAX_SBS
    else
	max_supers := max(MIN_SBS, user_ip^.simultaneous_hfs_discs);

    { and init some variables }
    max_count[I_INODE]   := MAX_INODES;
    max_count[I_SB]      := max_supers;
    max_count[I_CG]      := MAX_CGS;
    max_count[I_DATABLK] := MAX_DATABLKS;

    min_count[I_INODE]   := MIN_INODES;
    min_count[I_SB]      := MIN_SBS;
    min_count[I_CG]      := MIN_CGS;
    min_count[I_DATABLK] := MIN_DATABLKS;

    unit_count[I_INODE]   := UNIT_INODES;
    unit_count[I_SB]      := UNIT_SBS;
    unit_count[I_CG]      := UNIT_CGS;
    unit_count[I_DATABLK] := UNIT_DATABLKS;

    item_size[I_INODE]   := sizeof(binode_type);
    item_size[I_DATABLK] := cache_blk_size;
    item_size[I_SB]      := 0;
    item_size[I_CG]      := 0;


    {
    { Configure the cache, in case TABLE never tries.
    }
    configure_cache;

end;


{
{ init_hfs_unit
{ look at the superblock and calculate the size of
{ the superblock, with cylinder summary info, and the
{ size of the cgroup, with freemap.  item_size[I_SB and I_CG]
{ has the maximum of these two sizes.
{ The cache isn't set up yet, but the buffer space is allocated,
{ so we just use the beginning of that space.
{ If it looks like an HFS unit, we call init_support_unit,
{ which will set "is_hfs_unit" in the h_unitable.
}
procedure init_hfs_unit(unum: integer; force: boolean);
label
    999;
var
    fs: super_block_ptr_type;
    base_unum: integer;
    is_hfs: boolean;
begin
    is_hfs := false;

    if not initialized then begin
	ioresult := ord(zdvrnoconfig);
	goto 999;
    end;

    { read in fixed part of superblock }
    fs := super_block_ptr_type(cache_space);
    fs^.magic := 0;
    unum := set_unit(unum);
    try
	get_bytes(unum, sizeof(fs^), SBLOCK*DEV_BSIZE, windowp(fs));
    recover
	if (ioresult = ord(zmediumchanged))
	and not unitable^[unum].umediavalid then begin
	    try
		ioresult := ord(inoerror);
		get_bytes(unum, sizeof(fs^), SBLOCK*DEV_BSIZE, windowp(fs));
	    recover
		goto 999;
	end
	else    goto 999;

    { is it HFS? }
    with fs^ do
    if (magic <> FS_MAGIC)
	or (fsize * frag <> bsize)
	or (minfree < 0)
	or (minfree > 100)
	or (sbsize < sizeof(fs^))
	or (cpg * ncg < ncyl) then
	    goto 999;

    with fs^ do
	{ size * fsize must fit in 31 bits (disk byte addressable) }
	if (size < 0) or (maxint div size < fsize) then
	    goto 999;

    is_hfs := true;

    { sb = sb_bytes + cs_bytes }
    item_size[I_SB] := max(item_size[I_SB],
			   actual_sb_size(fs) + actual_cs_size(fs));
    item_size[I_CG] := max(item_size[I_CG],
			   actual_cg_size(fs));

999:
    if is_hfs or force then
	init_support_unit(unum, is_hfs and (fs^.clean <> chr(FS_CLEAN)));
end;


{
{ configure_cache
{ Here we allocate the cache memory to the buffer headers.
{ The algorithm is as follows:
{ We have a replicable unit consisting of unit_count[i] copies
{ of item i.
{ We allocate this unit as many times as fits.
{ We may have to adjust by maximum counts on some items.
{ If this unit doesn't fit at all, then we take min_count[i] copies.
{ If even that won't fit, we panic.
{ Each buffer item is preceded by a pointer back to its buffer header.
{ This is not included in the various types, so we have
{ to add room for it here.
}
procedure configure_cache;
label
    999;
var
    unit_bytes: integer;
    factor: integer;
    half_extra, extra_bytes: integer;
    i: integer;
    cache_address: ipointer;
begin
    if not initialized then
	goto 999;

    {
    { supply defaults if never could look at the disc.
    { The defaults are small, because this only happens in the
    { case of floppies, where TABLE can force an assignment of
    { HFSDAM even when the disc isn't in the drive yet.
    }
    if item_size[I_SB] = 0 then begin
	item_size[I_SB] := 3*1024;
	item_size[I_CG] := 2*1024;
    end;

    { account for back pointer for each type, plus header }
    for i := 1 to NUM_ITEMS do
	buf_size[i] := item_size[i] + bytes_per_ptr + sizeof(buf_hdr_type);

    { how many bytes in the unit? }
    unit_bytes := 0;
    for i := 1 to NUM_ITEMS do
	unit_bytes := unit_bytes + (unit_count[i] * buf_size[i]);

    { how many times does it fit in the cache? }
    factor := cache_bytes div unit_bytes;
    if factor > 0 then
	{ unit can be replicated }
	for i := 1 to NUM_ITEMS do
	    buf_count[i] := min(factor * unit_count[i], max_count[i])
    else
	{ Not enough space to replicate unit }
	for i := 1 to NUM_ITEMS do
	    buf_count[i] := min_count[i];

    { calculate extra bytes }
    extra_bytes := cache_bytes;
    for i := 1 to NUM_ITEMS do
	extra_bytes := extra_bytes - (buf_count[i] * buf_size[i]);

    { if none, panic }
    if extra_bytes < 0 then begin
	initialized := false;
	goto 999;
    end;

    { give half of the extra space to the datablks }
    half_extra := extra_bytes div 2;
    extra_bytes := half_extra;
    allocate_extra(half_extra, I_DATABLK);

    { the other half goes to the inodes }
    extra_bytes := extra_bytes + half_extra;
    allocate_extra(extra_bytes, I_INODE);

    { now allocate the cache space }
    cache_address := ipointer(cache_space);
    for i := 1 to NUM_ITEMS do begin
	assign_space(cache_address, buf_count[i], buf_size[i], i);
    end;

999:
    {
    { reinit enough vars so can run TABLE again (or for the first time)
    { (TABLE calls init_hfs_unit + configure_cache)
    }
    item_size[I_SB]      := 0;
    item_size[I_CG]      := 0;
end;


{
{ sync the cache by flushing all dirty buffers
}
procedure sync;
label
    999;
var
    i, j: integer;
    buf_hdr: buf_hdr_ptr_type;
    tmpioresult: integer;
begin
    if not initialized then
	goto 999;
    tmpioresult := ioresult;
    for i := 1 to NUM_ITEMS do
	for j := 1 to buf_count[i] do begin
	    buf_hdr := addr(buf_hdrs_array[i]^[j]);
	    with buf_hdr^ do begin
		use_count := 0;
		{fix for cacher bug with 2 HFS units and LIBRARIAN. SFB 6-16-87}
		if is_dirty and (unit=current_unit) {SFB} then begin
		    try
			write_buf(buf_hdr);
		    recover begin
			if tmpioresult = ord(inoerror) then
			    tmpioresult := ioresult;
			invalidate_unit(unit);
		    end;
		    is_dirty := false;
		end;
	    end;
	end;
999:
    ioresult := tmpioresult;
end;

$if FALSE$
{
{ check_cache
{ debug routine
{ caller asserts that all buffers have been released.
}
{ Don't enable except for test. SFB
procedure check_cache;
label
    999;
var
    i, j: integer;
begin
    if not initialized then
	goto 999;
    for i := 1 to NUM_ITEMS do
	for j := 1 to buf_count[i] do
	    if buf_hdrs_array[i]^[j].use_count <> 0 then
		writeln('BUFFER STILL IN USE, type ', i:1, ' # ', j:1);
999:
end;
}

{
{ print the cache statistics
}
procedure cache_stats;
label
    999;
var
    i: integer;
begin
    if not initialized then
	goto 999;
    for i := 1 to NUM_ITEMS do begin
	if i = I_INODE then
	    write('inodes  ')
	else
	if i = I_SB then
	    write('supers  ')
	else
	if i = I_CG then
	    write('cgroups ')
	else
	if i = I_DATABLK then
	    write('data    ');
	write('(', buf_count[i]:2, ' bufs) ');
	write(cache_lazy[i]:4, ' L, ');
	write(cache_flush[i]:4, ' F, ');
	if cache_lazy[i] + cache_flush[i] = 0 then
	    write('L/F+L  0%, ')
	else
	    write('L/F+L ', ((cache_lazy[i]*100)
		div (cache_lazy[i] + cache_flush[i])):2, '%, ');
	write(cache_hit[i]:4, ' H, ');
	write(cache_miss[i]:4, ' M, ');
	if cache_hit[i] + cache_miss[i] = 0 then
	    writeln('hit rate  0%')
	else
	writeln('hit rate ', ((cache_hit[i]*100)
		div (cache_hit[i] + cache_miss[i])):2, '%');
    end;
    for i := 1 to NUM_ITEMS do begin
	cache_hit[i] := 0;
	cache_miss[i] := 0;
	cache_lazy[i] := 0;
	cache_flush[i] := 0;
    end;

999:
end;
$end$

{
{ invalidate_unit
{ invalidate (no flush) all buffers on this unit.
{ if this unit is ALL_UNITS, then we invalidate on every unit.
{ if BASE_UNIT, then use current_unit.
}
procedure invalidate_unit(badunit: integer);
label
    999;
var
    i, j: integer;
begin
    if not initialized then
	goto 999;
    { force use of base unit }
    if badunit <> ALL_UNITS then
	badunit := h_unitable^.tbl[badunit].base_unum;
    for i := 1 to NUM_ITEMS do
	for j := 1 to buf_count[i] do
	    with buf_hdrs_array[i]^[j] do
		if (badunit = ALL_UNITS) or (badunit = unit) then begin
		    unit := NO_UNIT;
		    is_dirty := false;
		    use_count := 0;
		end;
999:
end;


{----------------------------------------------------------------}
{
{ EXPORTED DATA ACCESS ROUTINES
}

{
{ get a superblock
{ We check the disc with every get_superblock to see if it's
{ still OK.  If not, we invalidate the unit, and return nil.
}
function  get_superblock(unum: shortint): super_block_ptr_type;
var
    fs: super_block_ptr_type;
begin
    unum := set_unit(unum);
    try
      fs := super_block_ptr_type(get_buf(unum,
				       SBLOCK*DEV_BSIZE,
				       0,
				       I_SB));
    recover
	fs := nil;

    if (fs = nil) then begin
	invalidate_unit(unum);
	medium_gone;
    end;
    current_unit := unum;
    current_super := fs;
    get_superblock := fs;
end;


{
{ put superblock back
}
procedure put_superblock(fs: super_block_ptr_type;
			 how: cache_action_set);
begin
    if (fs <> nil) and (fs^.fmod = FS_MODIFIED) then begin
	how := how + [dirty];
	fs^.fmod := FS_NOT_MODIFIED;
    end;
    put_buf(fs, how);
end;


{
{ get a cgroup
}
function  get_cgroup(groupnum: integer): cgroup_ptr_type;
begin
    get_cgroup :=
	cgroup_ptr_type(get_buf(current_unit,
				cgroup_start(current_super, groupnum),
				actual_cg_size(current_super),
				I_CG));
end;


{
{ put cgroup back
}
procedure put_cgroup(cgp: cgroup_ptr_type;
		     how: cache_action_set);
begin
    put_buf(cgp, how);
end;


{
{ get an inode
}
function  get_inode(nodenum: integer): inode_ptr_type;
var
    ip: inode_ptr_type;
begin
    get_inode := nil;   {ensure no random garbage. iptrs are usually
			 instantiated from the stack. SFB}
    with current_super^ do
	if (nodenum < 1) or (nodenum > ncg*ipg) then begin
	    ioresult := ord(znosuchblk);
	    escape(-10);
	end;
    ip := inode_ptr_type(get_buf(current_unit,
				 inode_start(current_super, nodenum),
				 sizeof(inode_type),
				 I_INODE));
    if ip <> nil then
	binode_ptr_type(ip)^.inumber := nodenum;
    get_inode := ip;
end;


{
{ put inode back
}
procedure put_inode(ip: inode_ptr_type;
		    how: cache_action_set);
begin
    put_buf(ip, how);
end;


{
{ get_data1
{ subroutine of get_datablk and get_edatablk
{ we want the cache block that contains the byte addressed
{ by offset, where this is the offset within the given frag.
}
function  get_data1(fragnum: frag_type;
		    offset: integer;
		    item: integer): cache_blk_ptr_type;
var
    daddr: integer;
begin
    daddr := rounddownp2(fragstobytes(current_super, fragnum) + offset,
			 cache_blk_size);
    get_data1 :=
	cache_blk_ptr_type(get_buf(current_unit,
				   daddr,
				   cache_blk_size,
				   item));
end;


{
{ get a datablk
}
function  get_datablk(fragnum: frag_type;
		      offset: integer): cache_blk_ptr_type;
begin
    get_datablk := get_data1(fragnum, offset, I_DATABLK);
end;


{
{ get a datablk, but don't read the disc if it isn't in
{ the cache, because we're about to overwrite it anyway.
}
function  get_edatablk(fragnum: frag_type;
		       offset: integer): cache_blk_ptr_type;
begin
    get_edatablk := get_data1(fragnum, offset, I_EDATABLK);
end;


{
{ put datablk back
}
procedure put_datablk(dp: cache_blk_ptr_type;
		      how: cache_action_set);
begin
    put_buf(dp, how);
end;


{
{ Miscellany
}

{
{ Given an inode ptr, return the inode number.
}
function inumber(iptr: inode_ptr_type): integer;
begin
    inumber := binode_ptr_type(iptr)^.inumber;
end;

{
{ return the inode type (IFREG, IFDIR, etc).
}
function itype(ip: inode_ptr_type): integer;
begin
    itype := binand(ip^.mode, IFMT);
end;

end.


