diff --git a/asv_bench/benchmarks/dataset_io.py b/asv_bench/benchmarks/dataset_io.py index da18d541a16..3e070e1355b 100644 --- a/asv_bench/benchmarks/dataset_io.py +++ b/asv_bench/benchmarks/dataset_io.py @@ -168,7 +168,7 @@ def time_load_dataset_netcdf4_with_block_chunks_vindexing(self): ds = ds.isel(**self.vinds).load() def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self): - with dask.set_options(get=dask.multiprocessing.get): + with dask.config.set(scheduler="multiprocessing"): xr.open_dataset(self.filepath, engine='netcdf4', chunks=self.block_chunks).load() @@ -177,7 +177,7 @@ def time_load_dataset_netcdf4_with_time_chunks(self): chunks=self.time_chunks).load() def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self): - with dask.set_options(get=dask.multiprocessing.get): + with dask.config.set(scheduler="multiprocessing"): xr.open_dataset(self.filepath, engine='netcdf4', chunks=self.time_chunks).load() @@ -194,7 +194,7 @@ def setup(self): self.ds.to_netcdf(self.filepath, format=self.format) def time_load_dataset_scipy_with_block_chunks(self): - with dask.set_options(get=dask.multiprocessing.get): + with dask.config.set(scheduler="multiprocessing"): xr.open_dataset(self.filepath, engine='scipy', chunks=self.block_chunks).load() @@ -209,7 +209,7 @@ def time_load_dataset_scipy_with_block_chunks_vindexing(self): ds = ds.isel(**self.vinds).load() def time_load_dataset_scipy_with_time_chunks(self): - with dask.set_options(get=dask.multiprocessing.get): + with dask.config.set(scheduler="multiprocessing"): xr.open_dataset(self.filepath, engine='scipy', chunks=self.time_chunks).load() @@ -349,7 +349,7 @@ def time_load_dataset_netcdf4_with_block_chunks(self): chunks=self.block_chunks).load() def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self): - with dask.set_options(get=dask.multiprocessing.get): + with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset(self.filenames_list, engine='netcdf4', chunks=self.block_chunks).load() @@ -358,7 +358,7 @@ def time_load_dataset_netcdf4_with_time_chunks(self): chunks=self.time_chunks).load() def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self): - with dask.set_options(get=dask.multiprocessing.get): + with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset(self.filenames_list, engine='netcdf4', chunks=self.time_chunks).load() @@ -367,7 +367,7 @@ def time_open_dataset_netcdf4_with_block_chunks(self): chunks=self.block_chunks) def time_open_dataset_netcdf4_with_block_chunks_multiprocessing(self): - with dask.set_options(get=dask.multiprocessing.get): + with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset(self.filenames_list, engine='netcdf4', chunks=self.block_chunks) @@ -376,7 +376,7 @@ def time_open_dataset_netcdf4_with_time_chunks(self): chunks=self.time_chunks) def time_open_dataset_netcdf4_with_time_chunks_multiprocessing(self): - with dask.set_options(get=dask.multiprocessing.get): + with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset(self.filenames_list, engine='netcdf4', chunks=self.time_chunks) @@ -392,22 +392,22 @@ def setup(self): format=self.format) def time_load_dataset_scipy_with_block_chunks(self): - with dask.set_options(get=dask.multiprocessing.get): + with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset(self.filenames_list, engine='scipy', chunks=self.block_chunks).load() def time_load_dataset_scipy_with_time_chunks(self): - with dask.set_options(get=dask.multiprocessing.get): + with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset(self.filenames_list, engine='scipy', chunks=self.time_chunks).load() def time_open_dataset_scipy_with_block_chunks(self): - with dask.set_options(get=dask.multiprocessing.get): + with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset(self.filenames_list, engine='scipy', chunks=self.block_chunks) def time_open_dataset_scipy_with_time_chunks(self): - with dask.set_options(get=dask.multiprocessing.get): + with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset(self.filenames_list, engine='scipy', chunks=self.time_chunks) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 61da801badb..9a55b9380b9 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -106,7 +106,9 @@ Bug fixes (:issue:`2484`). By `Spencer Clark `_. - Adding a TimedeltaIndex to, or subtracting a TimedeltaIndex from a CFTimeIndex is now allowed (:issue:`2484`). - By `Spencer Clark `_. + By `Spencer Clark `_. +- Avoid use of Dask's deprecated ``get=`` parameter in tests + by `Matthew Rocklin `_. .. _whats-new.0.10.9: diff --git a/xarray/tests/__init__.py b/xarray/tests/__init__.py index 56ecfa30c4d..a45f71bbc3b 100644 --- a/xarray/tests/__init__.py +++ b/xarray/tests/__init__.py @@ -93,7 +93,7 @@ def LooseVersion(vstring): if LooseVersion(dask.__version__) < '0.18': dask.set_options(get=dask.get) else: - dask.config.set(scheduler='sync') + dask.config.set(scheduler='single-threaded') try: import_seaborn() has_seaborn = True diff --git a/xarray/tests/test_dask.py b/xarray/tests/test_dask.py index e56f751bef9..62ce7d074fa 100644 --- a/xarray/tests/test_dask.py +++ b/xarray/tests/test_dask.py @@ -26,7 +26,7 @@ class DaskTestCase(object): def assertLazyAnd(self, expected, actual, test): - with (dask.config.set(get=dask.get) + with (dask.config.set(scheduler='single-threaded') if LooseVersion(dask.__version__) >= LooseVersion('0.18.0') else dask.set_options(get=dask.get)): test(actual, expected) @@ -456,7 +456,11 @@ def counting_get(*args, **kwargs): count[0] += 1 return dask.get(*args, **kwargs) - ds.load(get=counting_get) + if dask.__version__ < '0.19.4': + ds.load(get=counting_get) + else: + ds.load(scheduler=counting_get) + assert count[0] == 1 def test_stack(self): @@ -831,7 +835,9 @@ def test_basic_compute(): dask.multiprocessing.get, dask.local.get_sync, None]: - with (dask.config.set(get=get) + with (dask.config.set(scheduler=get) + if LooseVersion(dask.__version__) >= LooseVersion('0.19.4') + else dask.config.set(scheduler=get) if LooseVersion(dask.__version__) >= LooseVersion('0.18.0') else dask.set_options(get=get)): ds.compute()