fsspec / s3fs

S3 Filesystem
http://s3fs.readthedocs.io/en/latest/
BSD 3-Clause "New" or "Revised" License
889 stars 274 forks source link

AttributeError: 'S3FileSystem' object has no attribute '_loop' #629

Open meteoDaniel opened 2 years ago

meteoDaniel commented 2 years ago

So here is my code to store a xarray.Dataset as zarr in s3 bucket


def store_xarray_as_zarr(
        dataset: xarray.Dataset,
        bucket_name: str,
        dataset_name: str,
        data_type_mapping: Dict[str, str],
        append_dim: str = "dt_calc",
        time_dimensions: Optional[List[str]] = None,
        storage_class: str = "GLACIER_IR",
) -> None:
    """
    Takes xarray.Dataset object and stores it in the given s3 bucket

    Args:
        dataset: xarray.Dataset objects contains the data
        bucket_name: name of the bucket on s3
        dataset_name: refined name of the dataset, can be a path too!
        data_type_mapping: is the final mapping to the dtype for the given data
        append_dim: dimension to append data along zarr archive
        time_dimensions: all time dimensions will be stored as float64 and has to be encoded additionally
        storage_class: 'STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|
                       'GLACIER'|'DEEP_ARCHIVE'|'OUTPOSTS'|'GLACIER_IR'

    Returns:
        None, stores data in s3
    """
    if time_dimensions is None:
        time_dimensions = ["dt_calc", "dt_fore"]
    check_aws_env_vars()

    s3_out = s3fs.S3FileSystem(
        anon=False, s3_additional_kwargs={"StorageClass": storage_class}
    )
    store_out = s3fs.S3Map(
        root=f"s3:///{bucket_name}/{dataset_name}.zarr", s3=s3_out, check=False
    )

    try:
        create_bucket(bucket_name)
        compressor = zarr.Blosc(cname="zstd", clevel=6, shuffle=2)
        encoding = {
            key: {"dtype": data_type_mapping[key], "compressor": compressor}
            for key in list(dataset.keys())
            if key not in time_dimensions
        }
        for time_dimension in time_dimensions:
            encoding.update({time_dimension: {"dtype": "float64"}})
        dataset.to_zarr(
            store_out, mode="w-", encoding=encoding, compute=True, consolidated=True
        )
    except zarr.errors.ContainsGroupError:
        dataset.to_zarr(
            store_out, mode="a", append_dim=append_dim, compute=True, consolidated=True
        )

Actually the script fails at this point:

        dataset.to_zarr(
            store_out, mode="w-", encoding=encoding, compute=True, consolidated=True
        )

With this error message:


---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
/app/src/radar_data/german_radar_data_download.py in <module>
----> 1 process_radoland_data_upload(datetime(2022, 1, 1), datetime(2022, 2, 1))

/app/src/radar_data/german_radar_data_download.py in process_radoland_data_upload(dt_start, dt_end)
    113                 DwdRadarParameter.RADOLAN_CDC
    114             )
--> 115             store_xarray_as_zarr(
    116                 ds,
    117                 'alitiq-radolan-hourly',

/usr/local/lib/python3.9/site-packages/alitiq_db/utils/aws/s3.py in store_xarray_as_zarr(dataset, bucket_name, dataset_name, data_type_mapping, append_dim, time_dimensions, storage_class)
    255         for time_dimension in time_dimensions:
    256             encoding.update({time_dimension: {"dtype": "float64"}})
--> 257         dataset.to_zarr(
    258             store_out, mode="w-", encoding=encoding, compute=True, consolidated=True
    259         )

/usr/local/lib/python3.9/site-packages/xarray/core/dataset.py in to_zarr(self, store, chunk_store, mode, synchronizer, group, encoding, compute, consolidated, append_dim, region, safe_chunks, storage_options)
   2034             encoding = {}
   2035 
-> 2036         return to_zarr(
   2037             self,
   2038             store=store,

/usr/local/lib/python3.9/site-packages/xarray/backends/api.py in to_zarr(dataset, store, chunk_store, mode, synchronizer, group, encoding, compute, consolidated, append_dim, region, safe_chunks, storage_options)
   1389         already_consolidated = False
   1390         consolidate_on_close = consolidated or consolidated is None
-> 1391     zstore = backends.ZarrStore.open_group(
   1392         store=mapper,
   1393         mode=mode,

/usr/local/lib/python3.9/site-packages/xarray/backends/zarr.py in open_group(cls, store, mode, synchronizer, group, consolidated, consolidate_on_close, chunk_store, storage_options, append_dim, write_region, safe_chunks, stacklevel)
    368             zarr_group = zarr.open_consolidated(store, **open_kwargs)
    369         else:
--> 370             zarr_group = zarr.open_group(store, **open_kwargs)
    371         return cls(
    372             zarr_group,

/usr/local/lib/python3.9/site-packages/zarr/hierarchy.py in open_group(store, mode, cache_attrs, synchronizer, path, chunk_store, storage_options)
   1192 
   1193     elif mode in ['w-', 'x']:
-> 1194         if contains_array(store, path=path):
   1195             raise ContainsArrayError(path)
   1196         elif contains_group(store, path=path):

/usr/local/lib/python3.9/site-packages/zarr/storage.py in contains_array(store, path)
     94     prefix = _path_to_prefix(path)
     95     key = prefix + array_meta_key
---> 96     return key in store
     97 
     98 

/usr/local/lib/python3.9/_collections_abc.py in __contains__(self, key)
    682     def __contains__(self, key):
    683         try:
--> 684             self[key]
    685         except KeyError:
    686             return False

/usr/local/lib/python3.9/site-packages/zarr/storage.py in __getitem__(self, key)
    543 
    544     def __getitem__(self, key):
--> 545         return self._mutable_mapping[key]
    546 
    547     def __setitem__(self, key, value):

/usr/local/lib/python3.9/site-packages/fsspec/mapping.py in __getitem__(self, key, default)
    135         k = self._key_to_str(key)
    136         try:
--> 137             result = self.fs.cat(k)
    138         except self.missing_exceptions:
    139             if default is not None:

/usr/local/lib/python3.9/site-packages/fsspec/asyn.py in wrapper(*args, **kwargs)
     84     def wrapper(*args, **kwargs):
     85         self = obj or args[0]
---> 86         return sync(self.loop, func, *args, **kwargs)
     87 
     88     return wrapper

/usr/local/lib/python3.9/site-packages/fsspec/asyn.py in loop(self)
    299         if self._pid != os.getpid():
    300             raise RuntimeError("This class is not fork-safe")
--> 301         return self._loop
    302 
    303     async def _rm_file(self, path, **kwargs):

AttributeError: 'S3FileSystem' object has no attribute '_loop'

Unfortunately my test is running fine, so it is difficult to find out whats the issue:

    ds = xarray.Dataset(
        {'temp': (('dt_calc', 'y', 'x'), np.array([[[1., 2., 3., 4.], [3., 4., 5., 6.]]]))},
        coords={'lon': ('y', np.array([50., 51.])), 'lat': ('x', np.array([4., 5., 6., 7.])),
                'dt_calc': ('dt_calc', [datetime(2022, 1, 1)])}
    )
    ds_2 = xarray.Dataset(
        {'temp': (('dt_calc', 'y', 'x'), np.array([[[1., 2., 3., 4.], [3., 4., 5., 6.]]]))},
        coords={'lon': ('y', np.array([50., 51.])), 'lat': ('x', np.array([4., 5., 6., 7.])),
                'dt_calc': ('dt_calc', [datetime(2022, 1, 1, 1)])}
    )
    store_xarray_as_zarr(ds, 'alitiq-test-bucket-db', '202206/test', {'temp': ds.temp.dtype},
                         time_dimensions=['dt_calc'])
    store_xarray_as_zarr(ds_2, 'alitiq-test-bucket-db', '202206/test', {'temp': ds.temp.dtype},
                         time_dimensions=['dt_calc'])

This is the head of my dataset:


<xarray.Dataset>
Dimensions:        (time: 744, y: 900, x: 900)
Coordinates:
    lat            (y, x) float64 46.95 46.95 46.95 46.96 ... 54.73 54.73 54.73
    lon            (y, x) float64 3.589 3.601 3.613 3.625 ... 15.67 15.69 15.7
  * time           (time) datetime64[ns] 2022-01-01T00:50:00 ... 2022-01-31T2...
Dimensions without coordinates: y, x
Data variables:
    precipitation  (time, y, x) float64 nan nan nan nan nan ... nan nan nan nan

Thanks a lot for your support !

Here is my env: boto3==1.21.10 aiobotocore==2.3.0 botocore==1.24.21 s3fs==2022.5.0 zarr==2.11.3 xarray==2022.3.0

martindurant commented 2 years ago

Can you please check: does s3_out have a ._loop immediately after creation? This should happen in __init__, so I don't see how there could not be. In which case, you must be creating new instances somewhere, and probably the same instance multiple times from multiple threads. I haven't heard of this problem elsewhere, so it must be a pretty rare race condition (although I don't even see dask feature in the stacktrace)

meteoDaniel commented 2 years ago

Dear @martindurant thanks for your reply.

Here is my result:


s3_out = s3fs.S3FileSystem(
            anon=False, s3_additional_kwargs={"StorageClass": storage_class}
       )

In [4]: s3_out._loop
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
/app/src/radar_data/german_radar_data_download.py in <module>
----> 1 s3_out._loop

AttributeError: 'S3FileSystem' object has no attribute '_loop'

In [5]: s3_out.loop
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
/app/src/radar_data/german_radar_data_download.py in <module>
----> 1 s3_out.loop

/usr/local/lib/python3.9/site-packages/fsspec/asyn.py in loop(self)
    299         if self._pid != os.getpid():
    300             raise RuntimeError("This class is not fork-safe")
--> 301         return self._loop
    302 
    303     async def _rm_file(self, path, **kwargs):

AttributeError: 'S3FileSystem' object has no attribute '_loop'

In [6]: s3fs.__version__
Out[6]: '2022.5.0'

The S3FileSystem has no loop object. What I have understoof so far is that this has to do with asynch features. Do I need a nest_asyncio ?

What do you mean with dask? Do I need a dask worker to use s3fs?

meteoDaniel commented 2 years ago

This is the expected output:

<_UnixSelectorEventLoop running=True closed=False debug=False>

Please note that the test I have mentioned above is running in another environement.

One of the difference I have spotted is that the env with the test running correct has no dask. The env failig has dask
2022.6.1. And there is a slightly different version of aiohttp 3.7.1 and 3.7.4. All other named packages are the same.

    from fsspec.asyn import get_loop
    loop = get_loop()
    s3_out = s3fs.S3FileSystem(
        anon=False, s3_additional_kwargs={"StorageClass": storage_class}, loop=loop
    )

This is not working either. This is really strange behaviour. Because within the other environment, everything is fine.

Here is pip3 list for the env that is not working:

aenum                 3.1.11
affine                2.3.1
aiobotocore           2.3.0
aiocache              0.11.1
aiofiles              0.8.0
aiohttp               3.7.4
aioitertools          0.10.0
alitiq-core           0.2.4
alitiq-db             0.0.38
appdirs               1.4.4
asciitree             0.3.3
async-timeout         3.0.1
attrs                 21.4.0
awslambdaric          1.0.0
backcall              0.2.0
beautifulsoup4        4.9.0
boto3                 1.21.10
botocore              1.24.21
cachetools            5.2.0
Cartopy               0.18.0
certifi               2022.6.15
cftime                1.6.0
chardet               3.0.4
click                 8.1.3
click-params          0.1.0
click-plugins         1.1.1
cligj                 0.7.2
cloudpickle           2.1.0
cloup                 0.8.0
cycler                0.11.0
Cython                0.29.21
dask                  2022.6.1
dateparser            1.1.1
decorator             5.1.1
deprecation           2.1.0
descartes             1.1.0
diskcache             5.4.0
distro                1.6.0
environs              9.5.0
fasteners             0.17.3
ffmpeg                1.4
Fiona                 1.8.21
fire                  0.3.1
fsspec                2022.5.0
GDAL                  3.0.4
geopandas             0.8.2
h11                   0.13.0
h2                    4.1.0
h3                    3.7.4
h5netcdf              0.10.0
h5py                  3.1.0
hpack                 4.0.0
Hypercorn             0.11.1
hyperframe            6.0.1
idna                  2.10
imutils               0.5.4
iniconfig             1.1.1
ipython               7.10.1
ipython-genutils      0.2.0
jarowinkler           1.0.4
jedi                  0.18.1
jmespath              0.10.0
joblib                1.1.0
kiwisolver            1.4.3
locket                1.0.0
lxml                  4.9.0
marshmallow           3.17.0
matplotlib            3.3.4
measurement           3.2.0
mock                  4.0.3
morecantile           3.1.2
mpmath                1.2.1
msgpack               1.0.2
multidict             4.7.6
munch                 2.5.0
nest-asyncio          1.5.5
netCDF4               1.5.6
numcodecs             0.10.0
numpy                 1.22.0
opencv-contrib-python 4.5.1.48
packaging             21.0
pandas                1.4.2
parso                 0.8.3
partd                 1.2.0
pexpect               4.8.0
pickleshare           0.7.5
Pillow                9.1.1
Pint                  0.19.2
pip                   21.2.4
pluggy                0.13.1
priority              2.0.0
prompt-toolkit        3.0.15
psycopg2-binary       2.8.6
ptyprocess            0.7.0
py                    1.8.2
pydantic              1.9.1
pygeos                0.10
Pygments              2.12.0
pykml                 0.2.0
pyparsing             2.0.3
PyPDF2                2.0.0
pyproj                3.3.1
pyshp                 2.3.0
pytest                7.1.2
python-dateutil       2.8.2
python-dotenv         0.20.0
pytz                  2020.1
pytz-deprecation-shim 0.1.0.post0
PyYAML                6.0
rapidfuzz             2.0.15
rasterio              1.2.10
regex                 2022.3.2
requests              2.25.1
rio-cogeo             3.2.0
s3fs                  2022.5.0
s3transfer            0.5.2
scikit-build          0.11.1
scikit-learn          1.1.1
scipy                 1.8.0
setuptools            57.5.0
Shapely               1.7.1
simplejson            3.17.2
six                   1.16.0
snuggs                1.4.4
soupsieve             2.3.2.post1
SQLAlchemy            1.3.13
sympy                 1.10.1
tabulate              0.8.9
termcolor             1.1.0
threadpoolctl         3.1.0
timezonefinder        6.0.1
toml                  0.10.2
tomli                 2.0.1
toolz                 0.11.2
tqdm                  4.64.0
trackpy               0.4.2
traitlets             5.3.0
typing_extensions     4.2.0
tzdata                2022.1
tzlocal               4.2
ujson                 4.0.2
urllib3               1.26.9
uvicorn               0.18.0
uvloop                0.16.0
validators            0.20.0
wcwidth               0.2.5
Werkzeug              2.0.0
wetterdienst          0.38.0
wheel                 0.37.0
xarray                2022.3.0
yarl                  1.6.0
zarr                  2.11.3

And here for the one that is working:

aiobotocore            2.3.0
aiofiles               0.5.0
aiohttp                3.7.1
aioitertools           0.10.0
alitiq-core            0.2.4
alitiq-db              0.0.38
asciitree              0.3.3
async-timeout          3.0.1
attrs                  21.4.0
backcall               0.2.0
boto3                  1.21.10
botocore               1.24.21
cachetools             3.1.1
certifi                2022.6.15
chardet                3.0.4
connectorx             0.2.3
decorator              5.1.1
fasteners              0.17.3
fsspec                 2022.5.0
GeoAlchemy2            0.10.2
greenlet               1.1.2
idna                   2.10
iniconfig              1.1.1
ipython                7.10.1
ipython-genutils       0.2.0
jedi                   0.18.1
jmespath               0.10.0
lxml                   4.9.0
mariadb                1.0.9
mock                   0.7.2
multidict              6.0.2
mysql-connector-python 8.0.28
nest-asyncio           1.4.1
numcodecs              0.10.0
numpy                  1.21.5
packaging              21.3
pandas                 1.4.0
parso                  0.8.3
pexpect                4.8.0
pickleshare            0.7.5
pip                    21.2.4
pluggy                 0.13.1
prompt-toolkit         3.0.29
protobuf               4.21.2
psycopg2-binary        2.9.3
ptyprocess             0.7.0
py                     1.11.0
Pygments               2.12.0
pykml                  0.2.0
pyparsing              3.0.9
pyproj                 3.3.1
pytest                 6.1.1
python-dateutil        2.8.2
pytz                   2022.1
PyYAML                 6.0
requests               2.23.0
s3fs                   2022.5.0
s3transfer             0.5.2
scipy                  1.7.3
setuptools             53.0.0
six                    1.16.0
SQLAlchemy             1.4.31
toml                   0.10.2
toolz                  0.10.0
traitlets              5.3.0
typing_extensions      4.2.0
urllib3                1.25.11
uvloop                 0.16.0
wcwidth                0.2.5
wheel                  0.36.2
xarray                 2022.3.0
yarl                   1.7.2
zarr                   2.11.3

Both env. using python3.9.10

meteoDaniel commented 2 years ago

Dear @martindurant ,

the loop object disappears after this line


super().__init__(loop=loop, asynchronous=asynchronous, **super_kwargs)

of S3FileSystem. So I took some time to copy S3FileSystem and fsspec.asyn from https://filesystem-spec.readthedocs.io/en/latest/_modules/fsspec/asyn.html into my repo and now it works.

I have checked out the installed version of fsspec.asyn. I guess there is somethin wrong with versioning:


In [1]: import fsspec

In [2]: fsspec.__version__
Out[2]: '2022.5.0'

In [3]: from fsspec import asyn

In [4]: asyn
Out[4]: <module 'fsspec.asyn' from '/usr/local/lib/python3.9/site-packages/fsspec/asyn.py'>

In [5]: cat /usr/local/lib/python3.9/site-packages/fsspec/asyn.py
import asyncio
import asyncio.events
import functools
import inspect
import io
import os
import re
import sys
import threading
from contextlib import contextmanager
from glob import has_magic

from .callbacks import _DEFAULT_CALLBACK
from .exceptions import FSTimeoutError
from .spec import AbstractBufferedFile, AbstractFileSystem
from .utils import is_exception, other_paths

private = re.compile("_[^_]")

async def _runner(event, coro, result, timeout=None):
    timeout = timeout if timeout else None  # convert 0 or 0.0 to None
    if timeout is not None:
        coro = asyncio.wait_for(coro, timeout=timeout)
    try:
        result[0] = await coro
    except Exception as ex:
        result[0] = ex
    finally:
        event.set()

def sync(loop, func, *args, timeout=None, **kwargs):
    """
    Make loop run coroutine until it returns. Runs in other thread
    """
    timeout = timeout if timeout else None  # convert 0 or 0.0 to None
    # NB: if the loop is not running *yet*, it is OK to submit work
    # and we will wait for it
    if loop is None or loop.is_closed():
        raise RuntimeError("Loop is not running")
    try:
        loop0 = asyncio.events.get_running_loop()
        if loop0 is loop:
            raise NotImplementedError("Calling sync() from within a running loop")
    except RuntimeError:
        pass
    coro = func(*args, **kwargs)
    result = [None]
    event = threading.Event()
    asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
    while True:
        # this loops allows thread to get interrupted
        if event.wait(1):
            break
        if timeout is not None:
            timeout -= 1
            if timeout < 0:
                raise FSTimeoutError

    return_result = result[0]
    if isinstance(return_result, asyncio.TimeoutError):
        # suppress asyncio.TimeoutError, raise FSTimeoutError
        raise FSTimeoutError from return_result
    elif isinstance(return_result, BaseException):
        raise return_result
    else:
        return return_result

iothread = [None]  # dedicated fsspec IO thread
loop = [None]  # global event loop for any non-async instance
lock = threading.Lock()  # for setting exactly one thread

def sync_wrapper(func, obj=None):
    """Given a function, make so can be called in async or blocking contexts

    Leave obj=None if defining within a class. Pass the instance if attaching
    as an attribute of the instance.
    """

    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        self = obj or args[0]
        return sync(self.loop, func, *args, **kwargs)

    return wrapper

@contextmanager
def _selector_policy():
    original_policy = asyncio.get_event_loop_policy()
    try:
        if (
            sys.version_info >= (3, 8)
            and os.name == "nt"
            and hasattr(asyncio, "WindowsSelectorEventLoopPolicy")
        ):
            asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())

        yield
    finally:
        asyncio.set_event_loop_policy(original_policy)

def get_running_loop():
    if hasattr(asyncio, "get_running_loop"):
        return asyncio.get_running_loop()
    else:
        loop = asyncio._get_running_loop()
        if loop is None:
            raise RuntimeError("no running event loop")
        else:
            return loop

def get_loop():
    """Create or return the default fsspec IO loop

    The loop will be running on a separate thread.
    """
    if loop[0] is None:
        with lock:
            # repeat the check just in case the loop got filled between the
            # previous two calls from another thread
            if loop[0] is None:
                with _selector_policy():
                    loop[0] = asyncio.new_event_loop()
                th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
                th.daemon = True
                th.start()
                iothread[0] = th
    return loop[0]

@contextmanager
def fsspec_loop():
    """Temporarily switch the current event loop to the fsspec's
    own loop, and then revert it back after the context gets
    terminated.
    """
    try:
        original_loop = get_running_loop()
    except RuntimeError:
        original_loop = None

    fsspec_loop = get_loop()
    try:
        asyncio._set_running_loop(fsspec_loop)
        yield fsspec_loop
    finally:
        asyncio._set_running_loop(original_loop)

try:
    import resource
except ImportError:
    resource = None
    ResourceError = OSError
else:
    ResourceEror = resource.error

_DEFAULT_BATCH_SIZE = 128
_NOFILES_DEFAULT_BATCH_SIZE = 1280

def _get_batch_size(nofiles=False):
    from fsspec.config import conf

    if nofiles:
        if "nofiles_gather_batch_size" in conf:
            return conf["nofiles_gather_batch_size"]
    else:
        if "gather_batch_size" in conf:
            return conf["gather_batch_size"]
    if nofiles:
        return _NOFILES_DEFAULT_BATCH_SIZE
    if resource is None:
        return _DEFAULT_BATCH_SIZE

    try:
        soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
    except (ImportError, ValueError, ResourceError):
        return _DEFAULT_BATCH_SIZE

    if soft_limit == resource.RLIM_INFINITY:
        return -1
    else:
        return soft_limit // 8

async def _run_coros_in_chunks(
    coros,
    batch_size=None,
    callback=_DEFAULT_CALLBACK,
    timeout=None,
    return_exceptions=False,
    nofiles=False,
):
    """Run the given coroutines in  chunks.

    Parameters
    ----------
    coros: list of coroutines to run
    batch_size: int or None
        Number of coroutines to submit/wait on simultaneously.
        If -1, then it will not be any throttling. If
        None, it will be inferred from _get_batch_size()
    callback: fsspec.callbacks.Callback instance
        Gets a relative_update when each coroutine completes
    timeout: number or None
        If given, each coroutine times out after this time. Note that, since
        there are multiple batches, the total run time of this function will in
        general be longer
    return_exceptions: bool
        Same meaning as in asyncio.gather
    nofiles: bool
        If inferring the batch_size, does this operation involve local files?
        If yes, you normally expect smaller batches.
    """

    if batch_size is None:
        batch_size = _get_batch_size(nofiles=nofiles)

    if batch_size == -1:
        batch_size = len(coros)

    assert batch_size > 0
    results = []
    for start in range(0, len(coros), batch_size):
        chunk = [
            asyncio.Task(asyncio.wait_for(c, timeout=timeout))
            for c in coros[start : start + batch_size]
        ]
        if callback is not _DEFAULT_CALLBACK:
            [
                t.add_done_callback(lambda *_, **__: callback.relative_update(1))
                for t in chunk
            ]
        results.extend(
            await asyncio.gather(*chunk, return_exceptions=return_exceptions),
        )
    return results

# these methods should be implemented as async by any async-able backend
async_methods = [
    "_ls",
    "_cat_file",
    "_get_file",
    "_put_file",
    "_rm_file",
    "_cp_file",
    "_pipe_file",
    "_expand_path",
    "_info",
    "_isfile",
    "_isdir",
    "_exists",
    "_walk",
    "_glob",
    "_find",
    "_du",
    "_size",
    "_mkdir",
    "_makedirs",
]

class AsyncFileSystem(AbstractFileSystem):
    """Async file operations, default implementations

    Passes bulk operations to asyncio.gather for concurrent operation.

    Implementations that have concurrent batch operations and/or async methods
    should inherit from this class instead of AbstractFileSystem. Docstrings are
    copied from the un-underscored method in AbstractFileSystem, if not given.
    """

    # note that methods do not have docstring here; they will be copied
    # for _* methods and inferred for overridden methods.

    async_impl = True
    disable_throttling = False

    def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs):
        self.asynchronous = asynchronous
        self._pid = os.getpid()
        if not asynchronous:
            self._loop = loop or get_loop()
        else:
            self._loop = None
        self.batch_size = batch_size
        super().__init__(*args, **kwargs)

    @property
    def loop(self):
        if self._pid != os.getpid():
            raise RuntimeError("This class is not fork-safe")
        return self._loop

    async def _rm_file(self, path, **kwargs):
        raise NotImplementedError

    async def _rm(self, path, recursive=False, batch_size=None, **kwargs):
        # TODO: implement on_error
        batch_size = batch_size or self.batch_size
        path = await self._expand_path(path, recursive=recursive)
        return await _run_coros_in_chunks(
            [self._rm_file(p, **kwargs) for p in path],
            batch_size=batch_size,
            nofiles=True,
        )

    async def _cp_file(self, path1, path2, **kwargs):
        raise NotImplementedError

    async def _copy(
        self,
        path1,
        path2,
        recursive=False,
        on_error=None,
        maxdepth=None,
        batch_size=None,
        **kwargs,
    ):
        if on_error is None and recursive:
            on_error = "ignore"
        elif on_error is None:
            on_error = "raise"

        paths = await self._expand_path(path1, maxdepth=maxdepth, recursive=recursive)
        path2 = other_paths(paths, path2)
        batch_size = batch_size or self.batch_size
        coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths, path2)]
        result = await _run_coros_in_chunks(
            coros, batch_size=batch_size, return_exceptions=True, nofiles=True
        )

        for ex in filter(is_exception, result):
            if on_error == "ignore" and isinstance(ex, FileNotFoundError):
                continue
            raise ex

    async def _pipe(self, path, value=None, batch_size=None, **kwargs):
        if isinstance(path, str):
            path = {path: value}
        batch_size = batch_size or self.batch_size
        return await _run_coros_in_chunks(
            [self._pipe_file(k, v, **kwargs) for k, v in path.items()],
            batch_size=batch_size,
            nofiles=True,
        )

    async def _process_limits(self, url, start, end):
        """Helper for "Range"-based _cat_file"""
        size = None
        suff = False
        if start is not None and start < 0:
            # if start is negative and end None, end is the "suffix length"
            if end is None:
                end = -start
                start = ""
                suff = True
            else:
                size = size or (await self._info(url))["size"]
                start = size + start
        elif start is None:
            start = 0
        if not suff:
            if end is not None and end < 0:
                if start is not None:
                    size = size or (await self._info(url))["size"]
                    end = size + end
            elif end is None:
                end = ""
            if isinstance(end, int):
                end -= 1  # bytes range is inclusive
        return "bytes=%s-%s" % (start, end)

    async def _cat_file(self, path, start=None, end=None, **kwargs):
        raise NotImplementedError

    async def _cat(
        self, path, recursive=False, on_error="raise", batch_size=None, **kwargs
    ):
        paths = await self._expand_path(path, recursive=recursive)
        coros = [self._cat_file(path, **kwargs) for path in paths]
        batch_size = batch_size or self.batch_size
        out = await _run_coros_in_chunks(
            coros, batch_size=batch_size, nofiles=True, return_exceptions=True
        )
        if on_error == "raise":
            ex = next(filter(is_exception, out), False)
            if ex:
                raise ex
        if (
            len(paths) > 1
            or isinstance(path, list)
            or paths[0] != self._strip_protocol(path)
        ):
            return {
                k: v
                for k, v in zip(paths, out)
                if on_error != "omit" or not is_exception(v)
            }
        else:
            return out[0]

    async def _cat_ranges(
        self, paths, starts, ends, max_gap=None, batch_size=None, **kwargs
    ):
        # TODO: on_error
        if max_gap is not None:
            # use utils.merge_offset_ranges
            raise NotImplementedError
        if not isinstance(paths, list):
            raise TypeError
        if not isinstance(starts, list):
            starts = [starts] * len(paths)
        if not isinstance(ends, list):
            ends = [starts] * len(paths)
        if len(starts) != len(paths) or len(ends) != len(paths):
            raise ValueError
        coros = [
            self._cat_file(p, start=s, end=e, **kwargs)
            for p, s, e in zip(paths, starts, ends)
        ]
        batch_size = batch_size or self.batch_size
        return await _run_coros_in_chunks(coros, batch_size=batch_size, nofiles=True)

    async def _put_file(self, lpath, rpath, **kwargs):
        raise NotImplementedError

    async def _put(
        self,
        lpath,
        rpath,
        recursive=False,
        callback=_DEFAULT_CALLBACK,
        batch_size=None,
        **kwargs,
    ):
        """Copy file(s) from local.

        Copies a specific file or tree of files (if recursive=True). If rpath
        ends with a "/", it will be assumed to be a directory, and target files
        will go within.

        The put_file method will be called concurrently on a batch of files. The
        batch_size option can configure the amount of futures that can be executed
        at the same time. If it is -1, then all the files will be uploaded concurrently.
        The default can be set for this instance by passing "batch_size" in the
        constructor, or for all instances by setting the "gather_batch_size" key
        in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
        """
        from .implementations.local import LocalFileSystem, make_path_posix

        rpath = self._strip_protocol(rpath)
        if isinstance(lpath, str):
            lpath = make_path_posix(lpath)
        fs = LocalFileSystem()
        lpaths = fs.expand_path(lpath, recursive=recursive)
        rpaths = other_paths(
            lpaths, rpath, exists=isinstance(rpath, str) and await self._isdir(rpath)
        )

        is_dir = {l: os.path.isdir(l) for l in lpaths}
        rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
        file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]

        await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
        batch_size = batch_size or self.batch_size

        coros = []
        callback.set_size(len(file_pairs))
        for lfile, rfile in file_pairs:
            callback.branch(lfile, rfile, kwargs)
            coros.append(self._put_file(lfile, rfile, **kwargs))

        return await _run_coros_in_chunks(
            coros, batch_size=batch_size, callback=callback
        )

    async def _get_file(self, rpath, lpath, **kwargs):
        raise NotImplementedError

    async def _get(
        self, rpath, lpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs
    ):
        """Copy file(s) to local.

        Copies a specific file or tree of files (if recursive=True). If lpath
        ends with a "/", it will be assumed to be a directory, and target files
        will go within. Can submit a list of paths, which may be glob-patterns
        and will be expanded.

        The get_file method will be called concurrently on a batch of files. The
        batch_size option can configure the amount of futures that can be executed
        at the same time. If it is -1, then all the files will be uploaded concurrently.
        The default can be set for this instance by passing "batch_size" in the
        constructor, or for all instances by setting the "gather_batch_size" key
        in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
        """
        from fsspec.implementations.local import make_path_posix

        rpath = self._strip_protocol(rpath)
        lpath = make_path_posix(lpath)
        rpaths = await self._expand_path(rpath, recursive=recursive)
        lpaths = other_paths(rpaths, lpath)
        [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
        batch_size = kwargs.pop("batch_size", self.batch_size)

        coros = []
        callback.set_size(len(lpaths))
        for lpath, rpath in zip(lpaths, rpaths):
            callback.branch(rpath, lpath, kwargs)
            coros.append(self._get_file(rpath, lpath, **kwargs))
        return await _run_coros_in_chunks(
            coros, batch_size=batch_size, callback=callback
        )

    async def _isfile(self, path):
        try:
            return (await self._info(path))["type"] == "file"
        except:  # noqa: E722
            return False

    async def _isdir(self, path):
        try:
            return (await self._info(path))["type"] == "directory"
        except IOError:
            return False

    async def _size(self, path):
        return (await self._info(path)).get("size", None)

    async def _sizes(self, paths, batch_size=None):
        batch_size = batch_size or self.batch_size
        return await _run_coros_in_chunks(
            [self._size(p) for p in paths], batch_size=batch_size
        )

    async def _exists(self, path):
        try:
            await self._info(path)
            return True
        except FileNotFoundError:
            return False

    async def _info(self, path, **kwargs):
        raise NotImplementedError

    async def _ls(self, path, detail=True, **kwargs):
        raise NotImplementedError

    async def _walk(self, path, maxdepth=None, **kwargs):
        path = self._strip_protocol(path)
        full_dirs = {}
        dirs = {}
        files = {}

        detail = kwargs.pop("detail", False)
        try:
            listing = await self._ls(path, detail=True, **kwargs)
        except (FileNotFoundError, IOError):
            if detail:
                yield path, {}, {}
            else:
                yield path, [], []
            return

        for info in listing:
            # each info name must be at least [path]/part , but here
            # we check also for names like [path]/part/
            pathname = info["name"].rstrip("/")
            name = pathname.rsplit("/", 1)[-1]
            if info["type"] == "directory" and pathname != path:
                # do not include "self" path
                full_dirs[pathname] = info
                dirs[name] = info
            elif pathname == path:
                # file-like with same name as give path
                files[""] = info
            else:
                files[name] = info

        if detail:
            yield path, dirs, files
        else:
            yield path, list(dirs), list(files)

        if maxdepth is not None:
            maxdepth -= 1
            if maxdepth < 1:
                return

        for d in full_dirs:
            async for _ in self._walk(d, maxdepth=maxdepth, detail=detail, **kwargs):
                yield _

    async def _glob(self, path, **kwargs):
        import re

        ends = path.endswith("/")
        path = self._strip_protocol(path)
        indstar = path.find("*") if path.find("*") >= 0 else len(path)
        indques = path.find("?") if path.find("?") >= 0 else len(path)
        indbrace = path.find("[") if path.find("[") >= 0 else len(path)

        ind = min(indstar, indques, indbrace)

        detail = kwargs.pop("detail", False)

        if not has_magic(path):
            root = path
            depth = 1
            if ends:
                path += "/*"
            elif await self._exists(path):
                if not detail:
                    return [path]
                else:
                    return {path: await self._info(path)}
            else:
                if not detail:
                    return []  # glob of non-existent returns empty
                else:
                    return {}
        elif "/" in path[:ind]:
            ind2 = path[:ind].rindex("/")
            root = path[: ind2 + 1]
            depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1
        else:
            root = ""
            depth = None if "**" in path else path[ind + 1 :].count("/") + 1

        allpaths = await self._find(
            root, maxdepth=depth, withdirs=True, detail=True, **kwargs
        )
        # Escape characters special to python regex, leaving our supported
        # special characters in place.
        # See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html
        # for shell globbing details.
        pattern = (
            "^"
            + (
                path.replace("\\", r"\\")
                .replace(".", r"\.")
                .replace("+", r"\+")
                .replace("//", "/")
                .replace("(", r"\(")
                .replace(")", r"\)")
                .replace("|", r"\|")
                .replace("^", r"\^")
                .replace("$", r"\$")
                .replace("{", r"\{")
                .replace("}", r"\}")
                .rstrip("/")
                .replace("?", ".")
            )
            + "$"
        )
        pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern)
        pattern = re.sub("[*]", "[^/]*", pattern)
        pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*"))
        out = {
            p: allpaths[p]
            for p in sorted(allpaths)
            if pattern.match(p.replace("//", "/").rstrip("/"))
        }
        if detail:
            return out
        else:
            return list(out)

    async def _du(self, path, total=True, maxdepth=None, **kwargs):
        sizes = {}
        # async for?
        for f in await self._find(path, maxdepth=maxdepth, **kwargs):
            info = await self._info(f)
            sizes[info["name"]] = info["size"]
        if total:
            return sum(sizes.values())
        else:
            return sizes

    async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
        path = self._strip_protocol(path)
        out = dict()
        detail = kwargs.pop("detail", False)
        # async for?
        async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
            if withdirs:
                files.update(dirs)
            out.update({info["name"]: info for name, info in files.items()})
        if not out and (await self._isfile(path)):
            # walk works on directories, but find should also return [path]
            # when path happens to be a file
            out[path] = {}
        names = sorted(out)
        if not detail:
            return names
        else:
            return {name: out[name] for name in names}

    async def _expand_path(self, path, recursive=False, maxdepth=None):
        if isinstance(path, str):
            out = await self._expand_path([path], recursive, maxdepth)
        else:
            # reduce depth on each recursion level unless None or 0
            maxdepth = maxdepth if not maxdepth else maxdepth - 1
            out = set()
            path = [self._strip_protocol(p) for p in path]
            for p in path:  # can gather here
                if has_magic(p):
                    bit = set(await self._glob(p))
                    out |= bit
                    if recursive:
                        out |= set(
                            await self._expand_path(
                                list(bit), recursive=recursive, maxdepth=maxdepth
                            )
                        )
                    continue
                elif recursive:
                    rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
                    out |= rec
                if p not in out and (recursive is False or (await self._exists(p))):
                    # should only check once, for the root
                    out.add(p)
        if not out:
            raise FileNotFoundError(path)
        return list(sorted(out))

    async def _mkdir(self, path, create_parents=True, **kwargs):
        pass  # not necessary to implement, may not have directories

    async def _makedirs(self, path, exist_ok=False):
        pass  # not necessary to implement, may not have directories

    async def open_async(self, path, mode="rb", **kwargs):
        if "b" not in mode or kwargs.get("compression"):
            raise ValueError
        raise NotImplementedError

def mirror_sync_methods(obj):
    """Populate sync and async methods for obj

    For each method will create a sync version if the name refers to an async method
    (coroutine) and there is no override in the child class; will create an async
    method for the corresponding sync method if there is no implementation.

    Uses the methods specified in
    - async_methods: the set that an implementation is expected to provide
    - default_async_methods: that can be derived from their sync version in
      AbstractFileSystem
    - AsyncFileSystem: async-specific default coroutines
    """
    from fsspec import AbstractFileSystem

    for method in async_methods + dir(AsyncFileSystem):
        if not method.startswith("_"):
            continue
        smethod = method[1:]
        if private.match(method):
            isco = inspect.iscoroutinefunction(getattr(obj, method, None))
            unsync = getattr(getattr(obj, smethod, False), "__func__", None)
            is_default = unsync is getattr(AbstractFileSystem, smethod, "")
            if isco and is_default:
                mth = sync_wrapper(getattr(obj, method), obj=obj)
                setattr(obj, smethod, mth)
                if not mth.__doc__:
                    mth.__doc__ = getattr(
                        getattr(AbstractFileSystem, smethod, None), "__doc__", ""
                    )

class FSSpecCoroutineCancel(Exception):
    pass

def _dump_running_tasks(
    printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
):
    import traceback

    tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
    if printout:
        [task.print_stack() for task in tasks]
    out = [
        {
            "locals": task._coro.cr_frame.f_locals,
            "file": task._coro.cr_frame.f_code.co_filename,
            "firstline": task._coro.cr_frame.f_code.co_firstlineno,
            "linelo": task._coro.cr_frame.f_lineno,
            "stack": traceback.format_stack(task._coro.cr_frame),
            "task": task if with_task else None,
        }
        for task in tasks
    ]
    if cancel:
        for t in tasks:
            cbs = t._callbacks
            t.cancel()
            asyncio.futures.Future.set_exception(t, exc)
            asyncio.futures.Future.cancel(t)
            [cb[0](t) for cb in cbs]  # cancels any dependent concurrent.futures
            try:
                t._coro.throw(exc)  # exits coro, unless explicitly handled
            except exc:
                pass
    return out

class AbstractAsyncStreamedFile(AbstractBufferedFile):
    # no read buffering, and always auto-commit
    # TODO: readahead might still be useful here, but needs async version

    async def read(self, length=-1):
        """
        Return data from cache, or fetch pieces as necessary

        Parameters
        ----------
        length: int (-1)
            Number of bytes to read; if <0, all remaining bytes.
        """
        length = -1 if length is None else int(length)
        if self.mode != "rb":
            raise ValueError("File not in read mode")
        if length < 0:
            length = self.size - self.loc
        if self.closed:
            raise ValueError("I/O operation on closed file.")
        if length == 0:
            # don't even bother calling fetch
            return b""
        out = await self._fetch_range(self.loc, self.loc + length)
        self.loc += len(out)
        return out

    async def write(self, data):
        """
        Write data to buffer.

        Buffer only sent on flush() or if buffer is greater than
        or equal to blocksize.

        Parameters
        ----------
        data: bytes
            Set of bytes to be written.
        """
        if self.mode not in {"wb", "ab"}:
            raise ValueError("File not in write mode")
        if self.closed:
            raise ValueError("I/O operation on closed file.")
        if self.forced:
            raise ValueError("This file has been force-flushed, can only close")
        out = self.buffer.write(data)
        self.loc += out
        if self.buffer.tell() >= self.blocksize:
            await self.flush()
        return out

    async def close(self):
        """Close file

        Finalizes writes, discards cache
        """
        if getattr(self, "_unclosable", False):
            return
        if self.closed:
            return
        if self.mode == "rb":
            self.cache = None
        else:
            if not self.forced:
                await self.flush(force=True)

            if self.fs is not None:
                self.fs.invalidate_cache(self.path)
                self.fs.invalidate_cache(self.fs._parent(self.path))

        self.closed = True

    async def flush(self, force=False):
        if self.closed:
            raise ValueError("Flush on closed file")
        if force and self.forced:
            raise ValueError("Force flush cannot be called more than once")
        if force:
            self.forced = True

        if self.mode not in {"wb", "ab"}:
            # no-op to flush on read-mode
            return

        if not force and self.buffer.tell() < self.blocksize:
            # Defer write on small block
            return

        if self.offset is None:
            # Initialize a multipart upload
            self.offset = 0
            try:
                await self._initiate_upload()
            except:  # noqa: E722
                self.closed = True
                raise

        if self._upload_chunk(final=force) is not False:
            self.offset += self.buffer.seek(0, 2)
            self.buffer = io.BytesIO()

    async def __aenter__(self):
        return self

    async def __aexit__(self, exc_type, exc_val, exc_tb):
        await self.close()

    async def _fetch_range(self, start, end):
        raise NotImplementedError

    async def _initiate_upload(self):
        raise NotImplementedError

    async def _upload_chunk(self, final=False):
        raise NotImplementedError

In [6]:

This is not the same asyn as I have found in the doc. And the version from the doc is working.

meteoDaniel commented 2 years ago

And a minor issue I have detected. FSTimeoutError should not be imported from asyn, it should be directly imported from fsspec.exceptions in s3fs.py

from fsspec.exceptions import FSTimeoutError
martindurant commented 2 years ago
with fsspec.open("zip://fsspec/asyn.py::https://files.pythonhosted.org/packages/bd/4c/166d788feff5c739b833342945bbba406581095fb6c4a056113fae646b5c/fsspec-2022.5.0-py3-none-any.whl", "rt") as f:
    print(f.read())

shows you the latest released version of asyn.py, and

with fsspec.open("github://fsspec:filesystem_spec@/fsspec/asyn.py", "rt") as f:
    print(f.read())

for the latest repository main branch version. I believe you will find that the version of the code is the same as on readthedocs. Might I suggest you try in a new environment?