% In its present form, reading certain large chunked datasets
% consumes a tremendous amount of memory while it is as fast as
% it should be if the dset is not chunked, or if the load()
% function is used.
% This has to be fixed quickly, but I don't understand it yet,
% I don't see much of a difference in what they both do.
%
% Maybe somebody else can confirm and/or comment?
%
pkg load hdf5oct;
%a = rand(230,40,40,70); % let's call that x)
a = rand(230,43,43,71); % y)
s = size(a)
s(1)=1
h5create("testchunk.h5","/foo_chunked",size(a),'ChunkSize',s)
h5write("testchunk.h5","/foo_chunked",a)
h5create("testchunk.h5","/foo_unchunked",size(a))
h5write("testchunk.h5","/foo_unchunked",a)
disp("load:")
%str = load("-hdf5","testchunk.h5"); %okay for both x) and y)
%clear str;
disp("read unchunked:")
%b = h5read("testchunk.h5","/foo_unchunked"); %okay for both x) and y)
disp("read chunked:")
b = h5read("testchunk.h5","/foo_chunked"); %okay for x), catastrophic for y)
% In its present form, reading certain large chunked datasets % consumes a tremendous amount of memory while it is as fast as % it should be if the dset is not chunked, or if the load() % function is used. % This has to be fixed quickly, but I don't understand it yet, % I don't see much of a difference in what they both do. % % Maybe somebody else can confirm and/or comment? % pkg load hdf5oct;
%a = rand(230,40,40,70); % let's call that x) a = rand(230,43,43,71); % y) s = size(a) s(1)=1 h5create("testchunk.h5","/foo_chunked",size(a),'ChunkSize',s) h5write("testchunk.h5","/foo_chunked",a) h5create("testchunk.h5","/foo_unchunked",size(a)) h5write("testchunk.h5","/foo_unchunked",a)
disp("load:") %str = load("-hdf5","testchunk.h5"); %okay for both x) and y) %clear str; disp("read unchunked:") %b = h5read("testchunk.h5","/foo_unchunked"); %okay for both x) and y) disp("read chunked:") b = h5read("testchunk.h5","/foo_chunked"); %okay for x), catastrophic for y)