package Tie::File;
require 5.005;
use Carp ':DEFAULT', 'confess';
use POSIX 'SEEK_SET';
use Fcntl 'O_CREAT', 'O_RDWR', 'LOCK_EX', 'LOCK_SH', 'O_WRONLY', 'O_RDONLY';
sub O_ACCMODE () { O_RDONLY | O_RDWR | O_WRONLY }
$VERSION = "0.97_02";
my $DEFAULT_MEMORY_SIZE = 1<<21; # 2 megabytes
my $DEFAULT_AUTODEFER_THRESHHOLD = 3; # 3 records
my $DEFAULT_AUTODEFER_FILELEN_THRESHHOLD = 65536; # 16 disk blocksful
my %good_opt = map {$_ => 1, "-$_" => 1}
qw(memory dw_size mode recsep discipline
autodefer autochomp autodefer_threshhold concurrent);
sub TIEARRAY {
if (@_ % 2 != 0) {
croak "usage: tie \@array, $_[0], filename, [option => value]...";
}
my ($pack, $file, %opts) = @_;
# transform '-foo' keys into 'foo' keys
for my $key (keys %opts) {
unless ($good_opt{$key}) {
croak("$pack: Unrecognized option '$key'\n");
}
my $okey = $key;
if ($key =~ s/^-+//) {
$opts{$key} = delete $opts{$okey};
}
}
if ($opts{concurrent}) {
croak("$pack: concurrent access not supported yet\n");
}
unless (defined $opts{memory}) {
# default is the larger of the default cache size and the
# deferred-write buffer size (if specified)
$opts{memory} = $DEFAULT_MEMORY_SIZE;
$opts{memory} = $opts{dw_size}
if defined $opts{dw_size} && $opts{dw_size} > $DEFAULT_MEMORY_SIZE;
# Dora Winifred Read
}
$opts{dw_size} = $opts{memory} unless defined $opts{dw_size};
if ($opts{dw_size} > $opts{memory}) {
croak("$pack: dw_size may not be larger than total memory allocation\n");
}
# are we in deferred-write mode?
$opts{defer} = 0 unless defined $opts{defer};
$opts{deferred} = {}; # no records are presently deferred
$opts{deferred_s} = 0; # count of total bytes in ->{deferred}
$opts{deferred_max} = -1; # empty
# What's a good way to arrange that this class can be overridden?
$opts{cache} = Tie::File::Cache->new($opts{memory});
# autodeferment is enabled by default
$opts{autodefer} = 1 unless defined $opts{autodefer};
$opts{autodeferring} = 0; # but is not initially active
$opts{ad_history} = [];
$opts{autodefer_threshhold} = $DEFAULT_AUTODEFER_THRESHHOLD
unless defined $opts{autodefer_threshhold};
$opts{autodefer_filelen_threshhold} = $DEFAULT_AUTODEFER_FILELEN_THRESHHOLD
unless defined $opts{autodefer_filelen_threshhold};
$opts{offsets} = [0];
$opts{filename} = $file;
unless (defined $opts{recsep}) {
$opts{recsep} = _default_recsep();
}
$opts{recseplen} = length($opts{recsep});
if ($opts{recseplen} == 0) {
croak "Empty record separator not supported by $pack";
}
$opts{autochomp} = 1 unless defined $opts{autochomp};
$opts{mode} = O_CREAT|O_RDWR unless defined $opts{mode};
$opts{rdonly} = (($opts{mode} & O_ACCMODE) == O_RDONLY);
$opts{sawlastrec} = undef;
my $fh;
if (UNIVERSAL::isa($file, 'GLOB')) {
# We use 1 here on the theory that some systems
# may not indicate failure if we use 0.
# MSWin32 does not indicate failure with 0, but I don't know if
# it will indicate failure with 1 or not.
unless (seek $file, 1, SEEK_SET) {
croak "$pack: your filehandle does not appear to be seekable";
}
seek $file, 0, SEEK_SET; # put it back
$fh = $file; # setting binmode is the user's problem
} elsif (ref $file) {
croak "usage: tie \@array, $pack, filename, [option => value]...";
} else {
# $fh = \do { local *FH }; # XXX this is buggy
if ($] < 5.006) {
# perl 5.005 and earlier don't autovivify filehandles
require Symbol;
$fh = Symbol::gensym();
}
sysopen $fh, $file, $opts{mode}, 0666 or return;
binmode $fh;
++$opts{ourfh};
}
{ my $ofh = select $fh; $| = 1; select $ofh } # autoflush on write
if (defined $opts{discipline} && $] >= 5.006) {
# This avoids a compile-time warning under 5.005
eval 'binmode($fh, $opts{discipline})';
croak $@ if $@ =~ /unknown discipline/i;
die if $@;
}
$opts{fh} = $fh;
bless \%opts => $pack;
}
sub FETCH {
my ($self, $n) = @_;
my $rec;
# check the defer buffer
$rec = $self->{deferred}{$n} if exists $self->{deferred}{$n};
$rec = $self->_fetch($n) unless defined $rec;
# inlined _chomp1
substr($rec, - $self->{recseplen}) = ""
if defined $rec && $self->{autochomp};
$rec;
}
# Chomp many records in-place; return nothing useful
sub _chomp {
my $self = shift;
return unless $self->{autochomp};
if ($self->{autochomp}) {
for (@_) {
next unless defined;
substr($_, - $self->{recseplen}) = "";
}
}
}
# Chomp one record in-place; return modified record
sub _chomp1 {
my ($self, $rec) = @_;
return $rec unless $self->{autochomp};
return unless defined $rec;
substr($rec, - $self->{recseplen}) = "";
$rec;
}
sub _fetch {
my ($self, $n) = @_;
# check the record cache
{ my $cached = $self->{cache}->lookup($n);
return $cached if defined $cached;
}
if ($#{$self->{offsets}} < $n) {
return if $self->{eof}; # request for record beyond end of file
my $o = $self->_fill_offsets_to($n);
# If it's still undefined, there is no such record, so return 'undef'
return unless defined $o;
}
my $fh = $self->{FH};
$self->_seek($n); # we can do this now that offsets is populated
my $rec = $self->_read_record;
# If we happen to have just read the first record, check to see if
# the length of the record matches what 'tell' says. If not, Tie::File
# won't work, and should drop dead.
#
# if ($n == 0 && defined($rec) && tell($self->{fh}) != length($rec)) {
# if (defined $self->{discipline}) {
# croak "I/O discipline $self->{discipline} not supported";
# } else {
# croak "File encoding not supported";
# }
# }
$self->{cache}->insert($n, $rec) if defined $rec && not $self->{flushing};
$rec;
}
sub STORE {
my ($self, $n, $rec) = @_;
die "STORE called from _check_integrity!" if $DIAGNOSTIC;
$self->_fixrecs($rec);
if ($self->{autodefer}) {
$self->_annotate_ad_history($n);
}
return $self->_store_deferred($n, $rec) if $self->_is_deferring;
# We need this to decide whether the new record will fit
# It incidentally populates the offsets table
# Note we have to do this before we alter the cache
# 20020324 Wait, but this DOES alter the cache. TODO BUG?
my $oldrec = $self->_fetch($n);
if (not defined $oldrec) {
# We're storing a record beyond the end of the file
$self->_extend_file_to($n+1);
$oldrec = $self->{recsep};
}
# return if $oldrec eq $rec; # don't bother
my $len_diff = length($rec) - length($oldrec);
# length($oldrec) here is not consistent with text mode TODO XXX BUG
$self->_mtwrite($rec, $self->{offsets}[$n], length($oldrec));
$self->_oadjust([$n, 1, $rec]);
$self->{cache}->update($n, $rec);
}
sub _store_deferred {
my ($self, $n, $rec) = @_;
$self->{cache}->remove($n);
my $old_deferred = $self->{deferred}{$n};
if (defined $self->{deferred_max} && $n > $self->{deferred_max}) {
$self->{deferred_max} = $n;
}
$self->{deferred}{$n} = $rec;
my $len_diff = length($rec);
$len_diff -= length($old_deferred) if defined $old_deferred;
$self->{deferred_s} += $len_diff;
$self->{cache}->adj_limit(-$len_diff);
if ($self->{deferred_s} > $self->{dw_size}) {
$self->_flush;
} elsif ($self->_cache_too_full) {
$self->_cache_flush;
}
}
# Remove a single record from the deferred-write buffer without writing it
# The record need not be present
sub _delete_deferred {
my ($self, $n) = @_;
my $rec = delete $self->{deferred}{$n};
return unless defined $rec;
if (defined $self->{deferred_max}
&& $n == $self->{deferred_max}) {
undef $self->{deferred_max};
}
$self->{deferred_s} -= length $rec;
$self->{cache}->adj_limit(length $rec);
}
sub FETCHSIZE {
my $self = shift;
my $n = $self->{eof} ? $#{$self->{offsets}} : $self->_fill_offsets;
my $top_deferred = $self->_defer_max;
$n = $top_deferred+1 if defined $top_deferred && $n < $top_deferred+1;
$n;
}
sub STORESIZE {
my ($self, $len) = @_;
if ($self->{autodefer}) {
$self->_annotate_ad_history('STORESIZE');
}
my $olen = $self->FETCHSIZE;
return if $len == $olen; # Woo-hoo!
# file gets longer
if ($len > $olen) {
if ($self->_is_deferring) {
for ($olen .. $len-1) {
$self->_store_deferred($_, $self->{recsep});
}
} else {
$self->_extend_file_to($len);
}
return;
}
# file gets shorter
if ($self->_is_deferring) {
# TODO maybe replace this with map-plus-assignment?
for (grep $_ >= $len, keys %{$self->{deferred}}) {
$self->_delete_deferred($_);
}
$self->{deferred_max} = $len-1;
}
$self->_seek($len);
$self->_chop_file;
$#{$self->{offsets}} = $len;
# $self->{offsets}[0] = 0; # in case we just chopped this
$self->{cache}->remove(grep $_ >= $len, $self->{cache}->ckeys);
}
### OPTIMIZE ME
### It should not be necessary to do FETCHSIZE
### Just seek to the end of the file.
sub PUSH {
my $self = shift;
$self->SPLICE($self->FETCHSIZE, scalar(@_), @_);
# No need to return:
# $self->FETCHSIZE; # because av.c takes care of this for me
}
sub POP {
my $self = shift;
my $size = $self->FETCHSIZE;
return if $size == 0;
# print STDERR "# POPPITY POP POP POP\n";
scalar $self->SPLICE($size-1, 1);
}
sub SHIFT {
my $self = shift;
scalar $self->SPLICE(0, 1);
}
sub UNSHIFT {
my $self = shift;
$self->SPLICE(0, 0, @_);
# $self->FETCHSIZE; # av.c takes care of this for me
}
sub CLEAR {
my $self = shift;
if ($self->{autodefer}) {
$self->_annotate_ad_history('CLEAR');
}
$self->_seekb(0);
$self->_chop_file;
$self->{cache}->set_limit($self->{memory});
$self->{cache}->empty;
@{$self->{offsets}} = (0);
%{$self->{deferred}}= ();
$self->{deferred_s} = 0;
$self->{deferred_max} = -1;
}
sub EXTEND {
my ($self, $n) = @_;
# No need to pre-extend anything in this case
return if $self->_is_deferring;
$self->_fill_offsets_to($n);
$self->_extend_file_to($n);
}
sub DELETE {
my ($self, $n) = @_;
if ($self->{autodefer}) {
$self->_annotate_ad_history('DELETE');
}
my $lastrec = $self->FETCHSIZE-1;
my $rec = $self->FETCH($n);
$self->_delete_deferred($n) if $self->_is_deferring;
if ($n == $lastrec) {
$self->_seek($n);
$self->_chop_file;
$#{$self->{offsets}}--;
$self->{cache}->remove($n);
# perhaps in this case I should also remove trailing null records?
# 20020316
# Note that delete @a[-3..-1] deletes the records in the wrong order,
# so we only chop the very last one out of the file. We could repair this
# by tracking deleted records inside the object.
} elsif ($n < $lastrec) {
$self->STORE($n, "");
}
$rec;
}
sub EXISTS {
my ($self, $n) = @_;
return 1 if exists $self->{deferred}{$n};
$n < $self->FETCHSIZE;
}
sub SPLICE {
my $self = shift;
if ($self->{autodefer}) {
$self->_annotate_ad_history('SPLICE');
}
$self->_flush if $self->_is_deferring; # move this up?
if (wantarray) {
$self->_chomp(my @a = $self->_splice(@_));
@a;
} else {
$self->_chomp1(scalar $self->_splice(@_));
}
}
sub DESTROY {
my $self = shift;
$self->flush if $self->_is_deferring;
$self->{cache}->delink if defined $self->{cache}; # break circular link
if ($self->{fh} and $self->{ourfh}) {
delete $self->{ourfh};
close delete $self->{fh};
}
}
sub _splice {
my ($self, $pos, $nrecs, @data) = @_;
my @result;
$pos = 0 unless defined $pos;
# Deal with negative and other out-of-range positions
# Also set default for $nrecs
{
my $oldsize = $self->FETCHSIZE;
$nrecs = $oldsize unless defined $nrecs;
my $oldpos = $pos;
if ($pos < 0) {
$pos += $oldsize;
if ($pos < 0) {
croak "Modification of non-creatable array value attempted, subscript $oldpos";
}
}
if ($pos > $oldsize) {
return unless @data;
$pos = $oldsize; # This is what perl does for normal arrays
}
# The manual is very unclear here
if ($nrecs < 0) {
$nrecs = $oldsize - $pos + $nrecs;
$nrecs = 0 if $nrecs < 0;
}
# nrecs is too big---it really means "until the end"
# 20030507
if ($nrecs + $pos > $oldsize) {
$nrecs = $oldsize - $pos;
}
}
$self->_fixrecs(@data);
my $data = join '', @data;
my $datalen = length $data;
my $oldlen = 0;
# compute length of data being removed
for ($pos .. $pos+$nrecs-1) {
last unless defined $self->_fill_offsets_to($_);
my $rec = $self->_fetch($_);
last unless defined $rec;
push @result, $rec;
# Why don't we just use length($rec) here?
# Because that record might have come from the cache. _splice
# might have been called to flush out the deferred-write records,
# and in this case length($rec) is the length of the record to be
# *written*, not the length of the actual record in the file. But
# the offsets are still true. 20020322
$oldlen += $self->{offsets}[$_+1] - $self->{offsets}[$_]
if defined $self->{offsets}[$_+1];
}
$self->_fill_offsets_to($pos+$nrecs);
# Modify the file
$self->_mtwrite($data, $self->{offsets}[$pos], $oldlen);
# Adjust the offsets table
$self->_oadjust([$pos, $nrecs, @data]);
{ # Take this read cache stuff out into a separate function
# You made a half-attempt to put it into _oadjust.
# Finish something like that up eventually.
# STORE also needs to do something similarish
# update the read cache, part 1
# modified records
for ($pos .. $pos+$nrecs-1) {
my $new = $data[$_-$pos];
if (defined $new) {
$self->{cache}->update($_, $new);
} else {
$self->{cache}->remove($_);
}
}
# update the read cache, part 2
# moved records - records past the site of the change
# need to be renumbered
# Maybe merge this with the previous block?
{
my @oldkeys = grep $_ >= $pos + $nrecs, $self->{cache}->ckeys;
my @newkeys = map $_-$nrecs+@data, @oldkeys;
$self->{cache}->rekey(\@oldkeys, \@newkeys);
}
# Now there might be too much data in the cache, if we spliced out
# some short records and spliced in some long ones. If so, flush
# the cache.
$self->_cache_flush;
}
# Yes, the return value of 'splice' *is* actually this complicated
wantarray ? @result : @result ? $result[-1] : undef;
}
# write data into the file
# $data is the data to be written.
# it should be written at position $pos, and should overwrite
# exactly $len of the following bytes.
# Note that if length($data) > $len, the subsequent bytes will have to
# be moved up, and if length($data) < $len, they will have to
# be moved down
sub _twrite {
my ($self, $data, $pos, $len) = @_;
unless (defined $pos) {
die "\$pos was undefined in _twrite";
}
my $len_diff = length($data) - $len;
if ($len_diff == 0) { # Woo-hoo!
my $fh = $self->{fh};
$self->_seekb($pos);
$self->_write_record($data);
return; # well, that was easy.
}
# the two records are of different lengths
# our strategy here: rewrite the tail of the file,
# reading ahead one buffer at a time
# $bufsize is required to be at least as large as the data we're overwriting
my $bufsize = _bufsize($len_diff);
my ($writepos, $readpos) = ($pos, $pos+$len);
my $next_block;
my $more_data;
# Seems like there ought to be a way to avoid the repeated code
# and the special case here. The read(1) is also a little weird.
# Think about this.
do {
$self->_seekb($readpos);
my $br = read $self->{fh}, $next_block, $bufsize;
$more_data = read $self->{fh}, my($dummy), 1;
$self->_seekb($writepos);
$self->_write_record($data);
$readpos += $br;
$writepos += length $data;
$data = $next_block;
} while $more_data;
$self->_seekb($writepos);
$self->_write_record($next_block);
# There might be leftover data at the end of the file
$self->_chop_file if $len_diff < 0;
}
# _iwrite(D, S, E)
# Insert text D at position S.
# Let C = E-S-|D|. If C < 0; die.
# Data in [S,S+C) is copied to [S+D,S+D+C) = [S+D,E).
# Data in [S+C = E-D, E) is returned. Data in [E, oo) is untouched.
#
# In a later version, don't read the entire intervening area into
# memory at once; do the copying block by block.
sub _iwrite {
my $self = shift;
my ($D, $s, $e) = @_;
my $d = length $D;
my $c = $e-$s-$d;
local *FH = $self->{fh};
confess "Not enough space to insert $d bytes between $s and $e"
if $c < 0;
confess "[$s,$e) is an invalid insertion range" if $e < $s;
$self->_seekb($s);
read FH, my $buf, $e-$s;
$D .= substr($buf, 0, $c, "");
$self->_seekb($s);
$self->_write_record($D);
return $buf;
}
# Like _twrite, but the data-pos-len triple may be repeated; you may
# write several chunks. All the writing will be done in
# one pass. Chunks SHALL be in ascending order and SHALL NOT overlap.
sub _mtwrite {
my $self = shift;
my $unwritten = "";
my $delta = 0;
@_ % 3 == 0
or die "Arguments to _mtwrite did not come in groups of three";
while (@_) {
my ($data, $pos, $len) = splice @_, 0, 3;
my $end = $pos + $len; # The OLD end of the segment to be replaced
$data = $unwritten . $data;
$delta -= length($unwritten);
$unwritten = "";
$pos += $delta; # This is where the data goes now
my $dlen = length $data;
$self->_seekb($pos);
if ($len >= $dlen) { # the data will fit
$self->_write_record($data);
$delta += ($dlen - $len); # everything following moves down by this much
$data = ""; # All the data in the buffer has been written
} else { # won't fit
my $writable = substr($data, 0, $len - $delta, "");
$self->_write_record($writable);
$delta += ($dlen - $len); # everything following moves down by this much
}
# At this point we've written some but maybe not all of the data.
# There might be a gap to close up, or $data might still contain a
# bunch of unwritten data that didn't fit.
my $ndlen = length $data;
if ($delta == 0) {
$self->_write_record($data);
} elsif ($delta < 0) {
# upcopy (close up gap)
if (@_) {
$self->_upcopy($end, $end + $delta, $_[1] - $end);
} else {
$self->_upcopy($end, $end + $delta);
}
} else {
# downcopy (insert data that didn't fit; replace this data in memory
# with _later_ data that doesn't fit)
if (@_) {
$unwritten = $self->_downcopy($data, $end, $_[1] - $end);
} else {
# Make the file longer to accommodate the last segment that doesn'
$unwritten = $self->_downcopy($data, $end);
}
}
}
}
# Copy block of data of length $len from position $spos to position $dpos
# $dpos must be <= $spos
#
# If $len is undefined, go all the way to the end of the file
# and then truncate it ($spos - $dpos bytes will be removed)
sub _upcopy {
my $blocksize = 8192;
my ($self, $spos, $dpos, $len) = @_;
if ($dpos > $spos) {
die "source ($spos) was upstream of destination ($dpos) in _upcopy";
} elsif ($dpos == $spos) {
return;
}
while (! defined ($len) || $len > 0) {
my $readsize = ! defined($len) ? $blocksize
: $len > $blocksize ? $blocksize
: $len;
my $fh = $self->{fh};
$self->_seekb($spos);
my $bytes_read = read $fh, my($data), $readsize;
$self->_seekb($dpos);
if ($data eq "") {
$self->_chop_file;
last;
}
$self->_write_record($data);
$spos += $bytes_read;
$dpos += $bytes_read;
$len -= $bytes_read if defined $len;
}
}
# Write $data into a block of length $len at position $pos,
# moving everything in the block forwards to make room.
# Instead of writing the last length($data) bytes from the block
# (because there isn't room for them any longer) return them.
#
# Undefined $len means 'until the end of the file'
sub _downcopy {
my $blocksize = 8192;
my ($self, $data, $pos, $len) = @_;
my $fh = $self->{fh};
while (! defined $len || $len > 0) {
my $readsize = ! defined($len) ? $blocksize
: $len > $blocksize? $blocksize : $len;
$self->_seekb($pos);
read $fh, my($old), $readsize;
my $last_read_was_short = length($old) < $readsize;
$data .= $old;
my $writable;
if ($last_read_was_short) {
# If last read was short, then $data now contains the entire rest
# of the file, so there's no need to write only one block of it
$writable = $data;
$data = "";
} else {
$writable = substr($data, 0, $readsize, "");
}
last if $writable eq "";
$self->_seekb($pos);
$self->_write_record($writable);
last if $last_read_was_short && $data eq "";
$len -= $readsize if defined $len;
$pos += $readsize;
}
return $data;
}
# Adjust the object data structures following an '_mtwrite'
# Arguments are
# [$pos, $nrecs, @length] items
# indicating that $nrecs records were removed at $recpos (a record offset)
# and replaced with records of length @length...
# Arguments guarantee that $recpos is strictly increasing.
# No return value
sub _oadjust {
my $self = shift;
my $delta = 0;
my $delta_recs = 0;
my $prev_end = -1;
my %newkeys;
for (@_) {
my ($pos, $nrecs, @data) = @$_;
$pos += $delta_recs;
# Adjust the offsets of the records after the previous batch up
# to the first new one of this batch
for my $i ($prev_end+2 .. $pos - 1) {
$self->{offsets}[$i] += $delta;
$newkey{$i} = $i + $delta_recs;
}
$prev_end = $pos + @data - 1; # last record moved on this pass
# Remove the offsets for the removed records;
# replace with the offsets for the inserted records
my @newoff = ($self->{offsets}[$pos] + $delta);
for my $i (0 .. $#data) {
my $newlen = length $data[$i];
push @newoff, $newoff[$i] + $newlen;
$delta += $newlen;
}
for my $i ($pos .. $pos+$nrecs-1) {
last if $i+1 > $#{$self->{offsets}};
my $oldlen = $self->{offsets}[$i+1] - $self->{offsets}[$i];
$delta -= $oldlen;
}
# # also this data has changed, so update it in the cache
# for (0 .. $#data) {
# $self->{cache}->update($pos + $_, $data[$_]);
# }
# if ($delta_recs) {
# my @oldkeys = grep $_ >= $pos + @data, $self->{cache}->ckeys;
# my @newkeys = map $_ + $delta_recs, @oldkeys;
# $self->{cache}->rekey(\@oldkeys, \@newkeys);
# }
# replace old offsets with new
splice @{$self->{offsets}}, $pos, $nrecs+1, @newoff;
# What if we just spliced out the end of the offsets table?
# shouldn't we clear $self->{eof}? Test for this XXX BUG TODO
$delta_recs += @data - $nrecs; # net change in total number of records
}
# The trailing records at the very end of the file
if ($delta) {
for my $i ($prev_end+2 .. $#{$self->{offsets}}) {
$self->{offsets}[$i] += $delta;
}
}
# If we scrubbed out all known offsets, regenerate the trivial table
# that knows that the file does indeed start at 0.
$self->{offsets}[0] = 0 unless @{$self->{offsets}};
# If the file got longer, the offsets table is no longer complete
# $self->{eof} = 0 if $delta_recs > 0;
# Now there might be too much data in the cache, if we spliced out
# some short records and spliced in some long ones. If so, flush
# the cache.
$self->_cache_flush;
}
# If a record does not already end with the appropriate terminator
# string, append one.
sub _fixrecs {
my $self = shift;
for (@_) {
$_ = "" unless defined $_;
$_ .= $self->{recsep}
unless substr($_, - $self->{recseplen}) eq $self->{recsep};
}
}
################################################################
#
# Basic read, write, and seek
#
# seek to the beginning of record #$n
# Assumes that the offsets table is already correctly populated
#
# Note that $n=-1 has a special meaning here: It means the start of
# the last known record; this may or may not be the very last record
# in the file, depending on whether the offsets table is fully populated.
#
sub _seek {
my ($self, $n) = @_;
my $o = $self->{offsets}[$n];
defined($o)
or confess("logic error: undefined offset for record $n");
seek $self->{fh}, $o, SEEK_SET
or confess "Couldn't seek filehandle: $!"; # "Should never happen."
}
# seek to byte $b in the file
sub _seekb {
my ($self, $b) = @_;
seek $self->{fh}, $b, SEEK_SET
or die "Couldn't seek filehandle: $!"; # "Should never happen."
}
# populate the offsets table up to the beginning of record $n
# return the offset of record $n
sub _fill_offsets_to {
my ($self, $n) = @_;
return $self->{offsets}[$n] if $self->{eof};
my $fh = $self->{fh};
local *OFF = $self->{offsets};
my $rec;
until ($#OFF >= $n) {
$self->_seek(-1); # tricky -- see comment at _seek
$rec = $self->_read_record;
if (defined $rec) {
push @OFF, int(tell $fh); # Tels says that int() saves memory here
} else {
$self->{eof} = 1;
return; # It turns out there is no such record
}
}
# we have now read all the records up to record n-1,
# so we can return the offset of record n
$OFF[$n];
}
sub _fill_offsets {
my ($self) = @_;
my $fh = $self->{fh};
local *OFF = $self->{offsets};
$self->_seek(-1); # tricky -- see comment at _seek
# Tels says that inlining read_record() would make this loop
# five times faster. 20030508
while ( defined $self->_read_record()) {
# int() saves us memory here
push @OFF, int(tell $fh);
}
$self->{eof} = 1;
$#OFF;
}
# assumes that $rec is already suitably terminated
sub _write_record {
my ($self, $rec) = @_;
my $fh = $self->{fh};
local $\ = "";
print $fh $rec
or die "Couldn't write record: $!"; # "Should never happen."
# $self->{_written} += length($rec);
}
sub _read_record {
my $self = shift;
my $rec;
{ local $/ = $self->{recsep};
my $fh = $self->{fh};
$rec = <$fh>;
}
return unless defined $rec;
if (substr($rec, -$self->{recseplen}) ne $self->{recsep}) {
# improperly terminated final record --- quietly fix it.
# my $ac = substr($rec, -$self->{recseplen});
# $ac =~ s/\n/\\n/g;
$self->{sawlastrec} = 1;
unless ($self->{rdonly}) {
local $\ = "";
my $fh = $self->{fh};
print $fh $self->{recsep};
}
$rec .= $self->{recsep};
}
# $self->{_read} += length($rec) if defined $rec;
$rec;
}
sub _rw_stats {
my $self = shift;
@{$self}{'_read', '_written'};
}
################################################################
#
# Read cache management
sub _cache_flush {
my ($self) = @_;
$self->{cache}->reduce_size_to($self->{memory} - $self->{deferred_s});
}
sub _cache_too_full {
my $self = shift;
$self->{cache}->bytes + $self->{deferred_s} >= $self->{memory};
}
################################################################
#
# File custodial services
#
# We have read to the end of the file and have the offsets table
# entirely populated. Now we need to write a new record beyond
# the end of the file. We prepare for this by writing
# empty records into the file up to the position we want
#
# assumes that the offsets table already contains the offset of record $n,
# if it exists, and extends to the end of the file if not.
sub _extend_file_to {
my ($self, $n) = @_;
$self->_seek(-1); # position after the end of the last record
my $pos = $self->{offsets}[-1];
# the offsets table has one entry more than the total number of records
my $extras = $n - $#{$self->{offsets}};
# Todo : just use $self->{recsep} x $extras here?
while ($extras-- > 0) {
$self->_write_record($self->{recsep});
push @{$self->{offsets}}, int(tell $self->{fh});
}
}
# Truncate the file at the current position
sub _chop_file {
my $self = shift;
truncate $self->{fh}, tell($self->{fh});
}
# compute the size of a buffer suitable for moving
# all the data in a file forward $n bytes
# ($n may be negative)
# The result should be at least $n.
sub _bufsize {
my $n = shift;
return 8192 if $n <= 0;
my $b = $n & ~8191;
$b += 8192 if $n & 8191;
$b;
}
################################################################
#
# Miscellaneous public methods
#
# Lock the file
sub flock {
my ($self, $op) = @_;
unless (@_ <= 3) {
my $pack = ref $self;
croak "Usage: $pack\->flock([OPERATION])";
}
my $fh = $self->{fh};
$op = LOCK_EX unless defined $op;
my $locked = flock $fh, $op;
if ($locked && ($op & (LOCK_EX | LOCK_SH))) {
# If you're locking the file, then presumably it's because
# there might have been a write access by another process.
# In that case, the read cache contents and the offsets table
# might be invalid, so discard them. 20030508
$self->{offsets} = [0];
$self->{cache}->empty;
}
$locked;
}
# Get/set autochomp option
sub autochomp {
my $self = shift;
if (@_) {
my $old = $self->{autochomp};
$self->{autochomp} = shift;
$old;
} else {
$self->{autochomp};
}
}
# Get offset table entries; returns offset of nth record
sub offset {
my ($self, $n) = @_;
if ($#{$self->{offsets}} < $n) {
return if $self->{eof}; # request for record beyond the end of file
my $o = $self->_fill_offsets_to($n);
# If it's still undefined, there is no such record, so return 'undef'
return unless defined $o;
}
$self->{offsets}[$n];
}
sub discard_offsets {
my $self = shift;
$self->{offsets} = [0];
}
################################################################
#
# Matters related to deferred writing
#
# Defer writes
sub defer {
my $self = shift;
$self->_stop_autodeferring;
@{$self->{ad_history}} = ();
$self->{defer} = 1;
}
# Flush deferred writes
#
# This could be better optimized to write the file in one pass, instead
# of one pass per block of records. But that will require modifications
# to _twrite, so I should have a good _twrite test suite first.
sub flush {
my $self = shift;
$self->_flush;
$self->{defer} = 0;
}
sub _old_flush {
my $self = shift;
my @writable = sort {$a<=>$b} (keys %{$self->{deferred}});
while (@writable) {
# gather all consecutive records from the front of @writable
my $first_rec = shift @writable;
my $last_rec = $first_rec+1;
++$last_rec, shift @writable while @writable && $last_rec == $writable[0];
--$last_rec;
$self->_fill_offsets_to($last_rec);
$self->_extend_file_to($last_rec);
$self->_splice($first_rec, $last_rec-$first_rec+1,
@{$self->{deferred}}{$first_rec .. $last_rec});
}
$self->_discard; # clear out defered-write-cache
}
sub _flush {
my $self = shift;
my @writable = sort {$a<=>$b} (keys %{$self->{deferred}});
my @args;
my @adjust;
while (@writable) {
# gather all consecutive records from the front of @writable
my $first_rec = shift @writable;
my $last_rec = $first_rec+1;
++$last_rec, shift @writable while @writable && $last_rec == $writable[0];
--$last_rec;
my $end = $self->_fill_offsets_to($last_rec+1);
if (not defined $end) {
$self->_extend_file_to($last_rec);
$end = $self->{offsets}[$last_rec];
}
my ($start) = $self->{offsets}[$first_rec];
push @args,
join("", @{$self->{deferred}}{$first_rec .. $last_rec}), # data
$start, # position
$end-$start; # length
push @adjust, [$first_rec, # starting at this position...
$last_rec-$first_rec+1, # this many records...
# are replaced with these...
@{$self->{deferred}}{$first_rec .. $last_rec},
];
}
$self->_mtwrite(@args); # write multiple record groups
$self->_discard; # clear out defered-write-cache
$self->_oadjust(@adjust);
}
# Discard deferred writes and disable future deferred writes
sub discard {
my $self = shift;
$self->_discard;
$self->{defer} = 0;
}
# Discard deferred writes, but retain old deferred writing mode
sub _discard {
my $self = shift;
%{$self->{deferred}} = ();
$self->{deferred_s} = 0;
$self->{deferred_max} = -1;
$self->{cache}->set_limit($self->{memory});
}
# Deferred writing is enabled, either explicitly ($self->{defer})
# or automatically ($self->{autodeferring})
sub _is_deferring {
my $self = shift;
$self->{defer} || $self->{autodeferring};
}
# The largest record number of any deferred record
sub _defer_max {
my $self = shift;
return $self->{deferred_max} if defined $self->{deferred_max};
my $max = -1;
for my $key (keys %{$self->{deferred}}) {
$max = $key if $key > $max;
}
$self->{deferred_max} = $max;
$max;
}
################################################################
#
# Matters related to autodeferment
#
# Get/set autodefer option
sub autodefer {
my $self = shift;
if (@_) {
my $old = $self->{autodefer};
$self->{autodefer} = shift;
if ($old) {
$self->_stop_autodeferring;
@{$self->{ad_history}} = ();
}
$old;
} else {
$self->{autodefer};
}
}
# The user is trying to store record #$n Record that in the history,
# and then enable (or disable) autodeferment if that seems useful.
# Note that it's OK for $n to be a non-number, as long as the function
# is prepared to deal with that. Nobody else looks at the ad_history.
#
# Now, what does the ad_history mean, and what is this function doing?
# Essentially, the idea is to enable autodeferring when we see that the
# user has made three consecutive STORE calls to three consecutive records.
# ("Three" is actually ->{autodefer_threshhold}.)
# A STORE call for record #$n inserts $n into the autodefer history,
# and if the history contains three consecutive records, we enable
# autodeferment. An ad_history of [X, Y] means that the most recent
# STOREs were for records X, X+1, ..., Y, in that order.
#
# Inserting a nonconsecutive number erases the history and starts over.
#
# Performing a special operation like SPLICE erases the history.
#
# There's one special case: CLEAR means that CLEAR was just called.
# In this case, we prime the history with [-2, -1] so that if the next
# write is for record 0, autodeferring goes on immediately. This is for
# the common special case of "@a = (...)".
#
sub _annotate_ad_history {
my ($self, $n) = @_;
return unless $self->{autodefer}; # feature is disabled
return if $self->{defer}; # already in explicit defer mode
return unless $self->{offsets}[-1] >= $self->{autodefer_filelen_threshhold};
local *H = $self->{ad_history};
if ($n eq 'CLEAR') {
@H = (-2, -1); # prime the history with fake records
$self->_stop_autodeferring;
} elsif ($n =~ /^\d+$/) {
if (@H == 0) {
@H = ($n, $n);
} else { # @H == 2
if ($H[1] == $n-1) { # another consecutive record
$H[1]++;
if ($H[1] - $H[0] + 1 >= $self->{autodefer_threshhold}) {
$self->{autodeferring} = 1;
}
} else { # nonconsecutive- erase and start over
@H = ($n, $n);
$self->_stop_autodeferring;
}
}
} else { # SPLICE or STORESIZE or some such
@H = ();
$self->_stop_autodeferring;
}
}
# If autodeferring was enabled, cut it out and discard the history
sub _stop_autodeferring {
my $self = shift;
if ($self->{autodeferring}) {
$self->_flush;
}
$self->{autodeferring} = 0;
}
################################################################
# This is NOT a method. It is here for two reasons:
# 1. To factor a fairly complicated block out of the constructor
# 2. To provide access for the test suite, which need to be sure
# files are being written properly.
sub _default_recsep {
my $recsep = $/;
if ($^O eq 'MSWin32') { # Dos too?
# Windows users expect files to be terminated with \r\n
# But $/ is set to \n instead
# Note that this also transforms \n\n into \r\n\r\n.
# That is a feature.
$recsep =~ s/\n/\r\n/g;
}
$recsep;
}
# Utility function for _check_integrity
sub _ci_warn {
my $msg = shift;
$msg =~ s/\n/\\n/g;
$msg =~ s/\r/\\r/g;
print "# $msg\n";
}
# Given a file, make sure the cache is consistent with the
# file contents and the internal data structures are consistent with
# each other. Returns true if everything checks out, false if not
#
# The $file argument is no longer used. It is retained for compatibility
# with the existing test suite.
sub _check_integrity {
my ($self, $file, $warn) = @_;
my $rsl = $self->{recseplen};
my $rs = $self->{recsep};
my $good = 1;
local *_; # local $_ does not work here
local $DIAGNOSTIC = 1;
if (not defined $rs) {
_ci_warn("recsep is undef!");
$good = 0;
} elsif ($rs eq "") {
_ci_warn("recsep is empty!");
$good = 0;
} elsif ($rsl != length $rs) {
my $ln = length $rs;
_ci_warn("recsep <$rs> has length $ln, should be $rsl");
$good = 0;
}
if (not defined $self->{offsets}[0]) {
_ci_warn("offset 0 is missing!");
$good = 0;
} elsif ($self->{offsets}[0] != 0) {
_ci_warn("rec 0: offset <$self->{offsets}[0]> s/b 0!");
$good = 0;
}
my $cached = 0;
{
local *F = $self->{fh};
seek F, 0, SEEK_SET;
local $. = 0;
local $/ = $rs;
while (