This module will ruin your data and shred your shared memory because:
Perl operations on tied variables are NOT atomic. You CANNOT do $x++ safely because that is two individual calls to the TIE mechanism - a read, then a write. There's nothing to prevent another read or write in-between those 2 operations.
This module is unfinished (maybe the author realised the problem and gave up?) - two different ways to use shared memory are half-implemented and either breaks both if used (corrupts the memory and Croaks)
This module seems to be unmaintained - no issues are being addressed.
Below is a working alternative providing a correct mechanism to have shared data among threads and processes, with a test at the end to check correctness.
use strict;
use warnings;
#use Fcntl ':flock'; # Import LOCK_* constants
#no strict 'subs';
my $LOCK_EX=2; # These are required, because the LOCK_* constants are sometimes not numbers, and inconveniently require "no strict 'subs';"
my $LOCK_UN=8;
use JSON::XS;
my $json = JSON::XS->new->canonical;
my $file_path = '/tmp/ramdisk/tiehash.dat';
sub shread { # read a key
my ($key) = @_;
my($data,$fh)= _load_from_file();
return $data->{$key};
}
sub shupdate { # add/overwrite a key, or update one
my ($key, $val, $inc) = @_;
my($data,$fh)= _load_from_file(1);
# Update the value for the key
if($inc) {
$data->{$key} = ($data->{$key} // 0) + $val;
} else {
$data->{$key} = $val;
}
my $ret=$data->{$key};
_save_to_file($data,$fh);
return $ret;
}
sub shreset { # clear (not an atomic operation - take care!)
open my $fh, '>', $file_path or die "$$ Cannot open $file_path: $!";
print $fh '{}';
$fh->close;
}
sub _load_from_file {
my($staylocked)=@_;
open my $fh, '+<', $file_path or die "$$ Cannot open $file_path: $!";
my $data = {};
flock($fh, 2) or die "$$ Cannot lock: $!";
my $json_text = do { local $/; <$fh> };
$data = $json->decode($json_text) if $json_text;
unless($staylocked){ # LOCK_UN (unlock)
flock($fh, 8) or die "$$ Cannot unlock: $!";
$fh->close;
}
return($data,$fh);
}
sub _save_to_file {
my ($data,$fh) = @_;
unless($fh) {
open $fh, '+<', $file_path or die "$$ Cannot open $file_path: $!";
}
flock($fh, 2) or die "$$ Cannot lock: $!"; # LOCK_EX (exclusive lock for writing)
truncate($fh, 0) or die "$$ Cannot truncate file: $!";
seek($fh, 0, 0) or die "$$ Cannot seek: $!";
print $fh $json->encode($data);
flock($fh, 8) or die "$$ Cannot unlock: $!"; # LOCK_UN (unlock)
$fh->close;
}
my $first=0;
# Perform 100 read/write operations
&shupdate('last',"I am $$");
&shupdate("me $$",&shread('foo'));
for my $i (1..100) {
my $value=&shread('foo');
if ($value == 0) {
$first=1;
}
&shupdate('foo',1,1) # $hash{foo} = $value + 1; # Increment the value and store it
}
sleep(2); # so we can spawn 400 other processes at once, and have them all running together
# Print final value of foo
print "$$ Final value of foo: " . &shread('foo') . "\n";
if($first) {
sleep(10);
print "\x1b[32;1m$$ Last before ending: foo=" . &shread('foo') . "\x1b[0m\n"; # prints Last before ending: foo=40000
}
# Test thusly:
# for FN in {1..400};do perl perlshared.pl & done
$x++
safely because that is two individual calls to the TIE mechanism - a read, then a write. There's nothing to prevent another read or write in-between those 2 operations.Below is a working alternative providing a correct mechanism to have shared data among threads and processes, with a test at the end to check correctness.