diff --git a/ci/requirements/doc.yml b/ci/requirements/doc.yml index ddede6ad383..d1a9c329d9f 100644 --- a/ci/requirements/doc.yml +++ b/ci/requirements/doc.yml @@ -24,7 +24,7 @@ dependencies: - rasterio>=1.1 - seaborn - setuptools - - sphinx=3.1 + - sphinx=3.2 - sphinx_rtd_theme>=0.4 - sphinx-autosummary-accessors - zarr>=2.4 diff --git a/doc/api-hidden.rst b/doc/api-hidden.rst index 6aca90860d2..b10271b13f5 100644 --- a/doc/api-hidden.rst +++ b/doc/api-hidden.rst @@ -52,6 +52,7 @@ core.rolling.DatasetCoarsen.var core.rolling.DatasetCoarsen.boundary core.rolling.DatasetCoarsen.coord_func + core.rolling.DatasetCoarsen.keep_attrs core.rolling.DatasetCoarsen.obj core.rolling.DatasetCoarsen.side core.rolling.DatasetCoarsen.trim_excess @@ -104,6 +105,8 @@ core.resample.DatasetResample.dims core.resample.DatasetResample.groups + core.rolling.DatasetRolling.argmax + core.rolling.DatasetRolling.argmin core.rolling.DatasetRolling.count core.rolling.DatasetRolling.max core.rolling.DatasetRolling.mean @@ -115,11 +118,15 @@ core.rolling.DatasetRolling.var core.rolling.DatasetRolling.center core.rolling.DatasetRolling.dim + core.rolling.DatasetRolling.keep_attrs core.rolling.DatasetRolling.min_periods core.rolling.DatasetRolling.obj core.rolling.DatasetRolling.rollings core.rolling.DatasetRolling.window + core.weighted.DatasetWeighted.obj + core.weighted.DatasetWeighted.weights + core.rolling_exp.RollingExp.mean Dataset.argsort @@ -188,6 +195,7 @@ core.rolling.DataArrayCoarsen.var core.rolling.DataArrayCoarsen.boundary core.rolling.DataArrayCoarsen.coord_func + core.rolling.DataArrayCoarsen.keep_attrs core.rolling.DataArrayCoarsen.obj core.rolling.DataArrayCoarsen.side core.rolling.DataArrayCoarsen.trim_excess @@ -238,6 +246,8 @@ core.resample.DataArrayResample.dims core.resample.DataArrayResample.groups + core.rolling.DataArrayRolling.argmax + core.rolling.DataArrayRolling.argmin core.rolling.DataArrayRolling.count core.rolling.DataArrayRolling.max core.rolling.DataArrayRolling.mean @@ -249,11 +259,15 @@ core.rolling.DataArrayRolling.var core.rolling.DataArrayRolling.center core.rolling.DataArrayRolling.dim + core.rolling.DataArrayRolling.keep_attrs core.rolling.DataArrayRolling.min_periods core.rolling.DataArrayRolling.obj core.rolling.DataArrayRolling.window core.rolling.DataArrayRolling.window_labels + core.weighted.DataArrayWeighted.obj + core.weighted.DataArrayWeighted.weights + DataArray.argsort DataArray.clip DataArray.conj @@ -277,6 +291,13 @@ core.accessor_dt.DatetimeAccessor.days_in_month core.accessor_dt.DatetimeAccessor.daysinmonth core.accessor_dt.DatetimeAccessor.hour + core.accessor_dt.DatetimeAccessor.is_leap_year + core.accessor_dt.DatetimeAccessor.is_month_end + core.accessor_dt.DatetimeAccessor.is_month_start + core.accessor_dt.DatetimeAccessor.is_quarter_end + core.accessor_dt.DatetimeAccessor.is_quarter_start + core.accessor_dt.DatetimeAccessor.is_year_end + core.accessor_dt.DatetimeAccessor.is_year_start core.accessor_dt.DatetimeAccessor.microsecond core.accessor_dt.DatetimeAccessor.minute core.accessor_dt.DatetimeAccessor.month @@ -291,6 +312,14 @@ core.accessor_dt.DatetimeAccessor.weekofyear core.accessor_dt.DatetimeAccessor.year + core.accessor_dt.TimedeltaAccessor.ceil + core.accessor_dt.TimedeltaAccessor.floor + core.accessor_dt.TimedeltaAccessor.round + core.accessor_dt.TimedeltaAccessor.days + core.accessor_dt.TimedeltaAccessor.microseconds + core.accessor_dt.TimedeltaAccessor.nanoseconds + core.accessor_dt.TimedeltaAccessor.seconds + core.accessor_str.StringAccessor.capitalize core.accessor_str.StringAccessor.center core.accessor_str.StringAccessor.contains @@ -365,6 +394,7 @@ Variable.min Variable.no_conflicts Variable.notnull + Variable.pad Variable.prod Variable.quantile Variable.rank @@ -407,6 +437,8 @@ IndexVariable.all IndexVariable.any + IndexVariable.argmax + IndexVariable.argmin IndexVariable.argsort IndexVariable.astype IndexVariable.broadcast_equals @@ -436,6 +468,7 @@ IndexVariable.min IndexVariable.no_conflicts IndexVariable.notnull + IndexVariable.pad IndexVariable.prod IndexVariable.quantile IndexVariable.rank @@ -538,6 +571,16 @@ ufuncs.tanh ufuncs.trunc + plot.plot + plot.line + plot.step + plot.hist + plot.contour + plot.contourf + plot.imshow + plot.pcolormesh + plot.scatter + plot.FacetGrid.map_dataarray plot.FacetGrid.set_titles plot.FacetGrid.set_ticks @@ -547,11 +590,16 @@ CFTimeIndex.any CFTimeIndex.append CFTimeIndex.argsort + CFTimeIndex.argmax + CFTimeIndex.argmin CFTimeIndex.asof CFTimeIndex.asof_locs CFTimeIndex.astype + CFTimeIndex.calendar + CFTimeIndex.ceil CFTimeIndex.contains CFTimeIndex.copy + CFTimeIndex.days_in_month CFTimeIndex.delete CFTimeIndex.difference CFTimeIndex.drop @@ -562,6 +610,7 @@ CFTimeIndex.equals CFTimeIndex.factorize CFTimeIndex.fillna + CFTimeIndex.floor CFTimeIndex.format CFTimeIndex.get_indexer CFTimeIndex.get_indexer_for @@ -602,6 +651,7 @@ CFTimeIndex.reindex CFTimeIndex.rename CFTimeIndex.repeat + CFTimeIndex.round CFTimeIndex.searchsorted CFTimeIndex.set_names CFTimeIndex.set_value @@ -688,15 +738,20 @@ backends.NetCDF4DataStore.is_remote backends.NetCDF4DataStore.lock + backends.H5NetCDFStore.autoclose backends.H5NetCDFStore.close backends.H5NetCDFStore.encode backends.H5NetCDFStore.encode_attribute backends.H5NetCDFStore.encode_variable + backends.H5NetCDFStore.format backends.H5NetCDFStore.get_attrs backends.H5NetCDFStore.get_dimensions backends.H5NetCDFStore.get_encoding backends.H5NetCDFStore.get_variables + backends.H5NetCDFStore.is_remote backends.H5NetCDFStore.load + backends.H5NetCDFStore.lock + backends.H5NetCDFStore.open backends.H5NetCDFStore.open_store_variable backends.H5NetCDFStore.prepare_variable backends.H5NetCDFStore.set_attribute diff --git a/doc/combining.rst b/doc/combining.rst index ffc6575c579..adf46c4e0bc 100644 --- a/doc/combining.rst +++ b/doc/combining.rst @@ -244,16 +244,6 @@ in this manner. Combining along multiple dimensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. note:: - - There are currently three combining functions with similar names: - :py:func:`~xarray.auto_combine`, :py:func:`~xarray.combine_by_coords`, and - :py:func:`~xarray.combine_nested`. This is because - ``auto_combine`` is in the process of being deprecated in favour of the other - two functions, which are more general. If your code currently relies on - ``auto_combine``, then you will be able to get similar functionality by using - ``combine_nested``. - For combining many objects along multiple dimensions xarray provides :py:func:`~xarray.combine_nested` and :py:func:`~xarray.combine_by_coords`. These functions use a combination of ``concat`` and ``merge`` across different diff --git a/doc/conf.py b/doc/conf.py index c5a8de694b5..01529b6eb59 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -100,8 +100,58 @@ autosummary_generate = True autodoc_typehints = "none" -napoleon_use_param = True +napoleon_use_param = False napoleon_use_rtype = True +napoleon_preprocess_types = True +napoleon_type_aliases = { + # general terms + "sequence": ":term:`sequence`", + "iterable": ":term:`iterable`", + "callable": ":py:func:`callable`", + "dict_like": ":term:`dict-like `", + "dict-like": ":term:`dict-like `", + "mapping": ":term:`mapping`", + "file-like": ":term:`file-like `", + # special terms + # "same type as caller": "*same type as caller*", # does not work, yet + # "same type as values": "*same type as values*", # does not work, yet + # stdlib type aliases + "MutableMapping": "~collections.abc.MutableMapping", + "sys.stdout": ":obj:`sys.stdout`", + "timedelta": "~datetime.timedelta", + "string": ":class:`string `", + # numpy terms + "array_like": ":term:`array_like`", + "array-like": ":term:`array-like `", + "scalar": ":term:`scalar`", + "array": ":term:`array`", + "hashable": ":term:`hashable `", + # matplotlib terms + "color-like": ":py:func:`color-like `", + "matplotlib colormap name": ":doc:matplotlib colormap name ", + "matplotlib axes object": ":py:class:`matplotlib axes object `", + "colormap": ":py:class:`colormap `", + # objects without namespace + "DataArray": "~xarray.DataArray", + "Dataset": "~xarray.Dataset", + "Variable": "~xarray.Variable", + "ndarray": "~numpy.ndarray", + "MaskedArray": "~numpy.ma.MaskedArray", + "dtype": "~numpy.dtype", + "ComplexWarning": "~numpy.ComplexWarning", + "Index": "~pandas.Index", + "MultiIndex": "~pandas.MultiIndex", + "CategoricalIndex": "~pandas.CategoricalIndex", + "TimedeltaIndex": "~pandas.TimedeltaIndex", + "DatetimeIndex": "~pandas.DatetimeIndex", + "Series": "~pandas.Series", + "DataFrame": "~pandas.DataFrame", + "Categorical": "~pandas.Categorical", + "Path": "~~pathlib.Path", + # objects with abbreviated namespace (from pandas) + "pd.Index": "~pandas.Index", + "pd.NaT": "~pandas.NaT", +} numpydoc_class_members_toctree = True numpydoc_show_class_members = False @@ -278,9 +328,9 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). -latex_documents = [ - ("index", "xarray.tex", "xarray Documentation", "xarray Developers", "manual") -] +# latex_documents = [ +# ("index", "xarray.tex", "xarray Documentation", "xarray Developers", "manual") +# ] # The name of an image file (relative to this directory) to place at the top of # the title page. @@ -307,7 +357,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [("index", "xarray", "xarray Documentation", ["xarray Developers"], 1)] +# man_pages = [("index", "xarray", "xarray Documentation", ["xarray Developers"], 1)] # If true, show URL addresses after external links. # man_show_urls = False @@ -318,17 +368,17 @@ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) -texinfo_documents = [ - ( - "index", - "xarray", - "xarray Documentation", - "xarray Developers", - "xarray", - "N-D labeled arrays and datasets in Python.", - "Miscellaneous", - ) -] +# texinfo_documents = [ +# ( +# "index", +# "xarray", +# "xarray Documentation", +# "xarray Developers", +# "xarray", +# "N-D labeled arrays and datasets in Python.", +# "Miscellaneous", +# ) +# ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] diff --git a/doc/dask.rst b/doc/dask.rst index de25ee2200e..4844967350b 100644 --- a/doc/dask.rst +++ b/doc/dask.rst @@ -1,3 +1,5 @@ +.. currentmodule:: xarray + .. _dask: Parallel computing with Dask @@ -90,7 +92,7 @@ use :py:func:`~xarray.open_mfdataset`:: xr.open_mfdataset('my/files/*.nc', parallel=True) This function will automatically concatenate and merge datasets into one in -the simple cases that it understands (see :py:func:`~xarray.auto_combine` +the simple cases that it understands (see :py:func:`~xarray.combine_by_coords` for the full disclaimer). By default, :py:meth:`~xarray.open_mfdataset` will chunk each netCDF file into a single Dask array; again, supply the ``chunks`` argument to control the size of the resulting Dask arrays. In more complex cases, you can diff --git a/doc/terminology.rst b/doc/terminology.rst index ab6d856920a..a85837bafbc 100644 --- a/doc/terminology.rst +++ b/doc/terminology.rst @@ -4,40 +4,103 @@ Terminology =========== -*Xarray terminology differs slightly from CF, mathematical conventions, and pandas; and therefore using xarray, understanding the documentation, and parsing error messages is easier once key terminology is defined. This glossary was designed so that more fundamental concepts come first. Thus for new users, this page is best read top-to-bottom. Throughout the glossary,* ``arr`` *will refer to an xarray* :py:class:`DataArray` *in any small examples. For more complete examples, please consult the relevant documentation.* - ----- - -**DataArray:** A multi-dimensional array with labeled or named dimensions. ``DataArray`` objects add metadata such as dimension names, coordinates, and attributes (defined below) to underlying "unlabeled" data structures such as numpy and Dask arrays. If its optional ``name`` property is set, it is a *named DataArray*. - ----- - -**Dataset:** A dict-like collection of ``DataArray`` objects with aligned dimensions. Thus, most operations that can be performed on the dimensions of a single ``DataArray`` can be performed on a dataset. Datasets have data variables (see **Variable** below), dimensions, coordinates, and attributes. - ----- - -**Variable:** A `NetCDF-like variable `_ consisting of dimensions, data, and attributes which describe a single array. The main functional difference between variables and numpy arrays is that numerical operations on variables implement array broadcasting by dimension name. Each ``DataArray`` has an underlying variable that can be accessed via ``arr.variable``. However, a variable is not fully described outside of either a ``Dataset`` or a ``DataArray``. - -.. note:: - - The :py:class:`Variable` class is low-level interface and can typically be ignored. However, the word "variable" appears often enough in the code and documentation that is useful to understand. - ----- - -**Dimension:** In mathematics, the *dimension* of data is loosely the number of degrees of freedom for it. A *dimension axis* is a set of all points in which all but one of these degrees of freedom is fixed. We can think of each dimension axis as having a name, for example the "x dimension". In xarray, a ``DataArray`` object's *dimensions* are its named dimension axes, and the name of the ``i``-th dimension is ``arr.dims[i]``. If an array is created without dimensions, the default dimension names are ``dim_0``, ``dim_1``, and so forth. - ----- - -**Coordinate:** An array that labels a dimension or set of dimensions of another ``DataArray``. In the usual one-dimensional case, the coordinate array's values can loosely be thought of as tick labels along a dimension. There are two types of coordinate arrays: *dimension coordinates* and *non-dimension coordinates* (see below). A coordinate named ``x`` can be retrieved from ``arr.coords[x]``. A ``DataArray`` can have more coordinates than dimensions because a single dimension can be labeled by multiple coordinate arrays. However, only one coordinate array can be a assigned as a particular dimension's dimension coordinate array. As a consequence, ``len(arr.dims) <= len(arr.coords)`` in general. - ----- - -**Dimension coordinate:** A one-dimensional coordinate array assigned to ``arr`` with both a name and dimension name in ``arr.dims``. Dimension coordinates are used for label-based indexing and alignment, like the index found on a :py:class:`pandas.DataFrame` or :py:class:`pandas.Series`. In fact, dimension coordinates use :py:class:`pandas.Index` objects under the hood for efficient computation. Dimension coordinates are marked by ``*`` when printing a ``DataArray`` or ``Dataset``. - ----- - -**Non-dimension coordinate:** A coordinate array assigned to ``arr`` with a name in ``arr.coords`` but *not* in ``arr.dims``. These coordinates arrays can be one-dimensional or multidimensional, and they are useful for auxiliary labeling. As an example, multidimensional coordinates are often used in geoscience datasets when :doc:`the data's physical coordinates (such as latitude and longitude) differ from their logical coordinates `. However, non-dimension coordinates are not indexed, and any operation on non-dimension coordinates that leverages indexing will fail. Printing ``arr.coords`` will print all of ``arr``'s coordinate names, with the corresponding dimension(s) in parentheses. For example, ``coord_name (dim_name) 1 2 3 ...``. - ----- - -**Index:** An *index* is a data structure optimized for efficient selecting and slicing of an associated array. Xarray creates indexes for dimension coordinates so that operations along dimensions are fast, while non-dimension coordinates are not indexed. Under the hood, indexes are implemented as :py:class:`pandas.Index` objects. The index associated with dimension name ``x`` can be retrieved by ``arr.indexes[x]``. By construction, ``len(arr.dims) == len(arr.indexes)`` +*Xarray terminology differs slightly from CF, mathematical conventions, and +pandas; so we've put together a glossary of its terms. Here,* ``arr`` * +refers to an xarray* :py:class:`DataArray` *in the examples. For more +complete examples, please consult the relevant documentation.* + +.. glossary:: + + DataArray + A multi-dimensional array with labeled or named + dimensions. ``DataArray`` objects add metadata such as dimension names, + coordinates, and attributes (defined below) to underlying "unlabeled" + data structures such as numpy and Dask arrays. If its optional ``name`` + property is set, it is a *named DataArray*. + + Dataset + A dict-like collection of ``DataArray`` objects with aligned + dimensions. Thus, most operations that can be performed on the + dimensions of a single ``DataArray`` can be performed on a + dataset. Datasets have data variables (see **Variable** below), + dimensions, coordinates, and attributes. + + Variable + A `NetCDF-like variable + `_ + consisting of dimensions, data, and attributes which describe a single + array. The main functional difference between variables and numpy arrays + is that numerical operations on variables implement array broadcasting + by dimension name. Each ``DataArray`` has an underlying variable that + can be accessed via ``arr.variable``. However, a variable is not fully + described outside of either a ``Dataset`` or a ``DataArray``. + + .. note:: + + The :py:class:`Variable` class is low-level interface and can + typically be ignored. However, the word "variable" appears often + enough in the code and documentation that is useful to understand. + + Dimension + In mathematics, the *dimension* of data is loosely the number of degrees + of freedom for it. A *dimension axis* is a set of all points in which + all but one of these degrees of freedom is fixed. We can think of each + dimension axis as having a name, for example the "x dimension". In + xarray, a ``DataArray`` object's *dimensions* are its named dimension + axes, and the name of the ``i``-th dimension is ``arr.dims[i]``. If an + array is created without dimension names, the default dimension names are + ``dim_0``, ``dim_1``, and so forth. + + Coordinate + An array that labels a dimension or set of dimensions of another + ``DataArray``. In the usual one-dimensional case, the coordinate array's + values can loosely be thought of as tick labels along a dimension. There + are two types of coordinate arrays: *dimension coordinates* and + *non-dimension coordinates* (see below). A coordinate named ``x`` can be + retrieved from ``arr.coords[x]``. A ``DataArray`` can have more + coordinates than dimensions because a single dimension can be labeled by + multiple coordinate arrays. However, only one coordinate array can be a + assigned as a particular dimension's dimension coordinate array. As a + consequence, ``len(arr.dims) <= len(arr.coords)`` in general. + + Dimension coordinate + A one-dimensional coordinate array assigned to ``arr`` with both a name + and dimension name in ``arr.dims``. Dimension coordinates are used for + label-based indexing and alignment, like the index found on a + :py:class:`pandas.DataFrame` or :py:class:`pandas.Series`. In fact, + dimension coordinates use :py:class:`pandas.Index` objects under the + hood for efficient computation. Dimension coordinates are marked by + ``*`` when printing a ``DataArray`` or ``Dataset``. + + Non-dimension coordinate + A coordinate array assigned to ``arr`` with a name in ``arr.coords`` but + *not* in ``arr.dims``. These coordinates arrays can be one-dimensional + or multidimensional, and they are useful for auxiliary labeling. As an + example, multidimensional coordinates are often used in geoscience + datasets when :doc:`the data's physical coordinates (such as latitude + and longitude) differ from their logical coordinates + `. However, non-dimension coordinates + are not indexed, and any operation on non-dimension coordinates that + leverages indexing will fail. Printing ``arr.coords`` will print all of + ``arr``'s coordinate names, with the corresponding dimension(s) in + parentheses. For example, ``coord_name (dim_name) 1 2 3 ...``. + + Index + An *index* is a data structure optimized for efficient selecting and + slicing of an associated array. Xarray creates indexes for dimension + coordinates so that operations along dimensions are fast, while + non-dimension coordinates are not indexed. Under the hood, indexes are + implemented as :py:class:`pandas.Index` objects. The index associated + with dimension name ``x`` can be retrieved by ``arr.indexes[x]``. By + construction, ``len(arr.dims) == len(arr.indexes)`` + + name + The names of dimensions, coordinates, DataArray objects and data + variables can be anything as long as they are :term:`hashable`. However, + it is preferred to use :py:class:`str` typed names. + + scalar + By definition, a scalar is not an :term:`array` and when converted to + one, it has 0 dimensions. That means that, e.g., :py:class:`int`, + :py:class:`float`, and :py:class:`str` objects are "scalar" while + :py:class:`list` or :py:class:`tuple` are not. diff --git a/doc/weather-climate.rst b/doc/weather-climate.rst index 9a92a1842d3..cb2921e2ed1 100644 --- a/doc/weather-climate.rst +++ b/doc/weather-climate.rst @@ -85,7 +85,7 @@ infer the sampling frequency of a :py:class:`~xarray.CFTimeIndex` or a 1-D With :py:meth:`~xarray.CFTimeIndex.strftime` we can also easily generate formatted strings from the datetime values of a :py:class:`~xarray.CFTimeIndex` directly or through the -:py:meth:`~xarray.DataArray.dt` accessor for a :py:class:`~xarray.DataArray` +``dt`` accessor for a :py:class:`~xarray.DataArray` using the same formatting as the standard `datetime.strftime`_ convention . .. _datetime.strftime: https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior @@ -206,6 +206,6 @@ For data indexed by a :py:class:`~xarray.CFTimeIndex` xarray currently supports: and silent errors due to the difference in calendar types between the dates encoded in your data and the dates stored in memory. -.. _Timestamp-valid range: https://pandas.pydata.org/pandas-docs/stable/timeseries.html#timestamp-limitations +.. _Timestamp-valid range: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timestamp-limitations .. _ISO 8601 standard: https://en.wikipedia.org/wiki/ISO_8601 -.. _partial datetime string indexing: https://pandas.pydata.org/pandas-docs/stable/timeseries.html#partial-string-indexing +.. _partial datetime string indexing: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#partial-string-indexing diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 2d6dbaa6ebe..09394448d3f 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -30,10 +30,10 @@ New Features - :py:meth:`~xarray.DataArray.rolling` and :py:meth:`~xarray.Dataset.rolling` now accept more than 1 dimension.(:pull:`4219`) By `Keisuke Fujii `_. -- Build :py:meth:`CFTimeIndex.__repr__` explicitly as :py:class:`pandas.Index`. Add ``calendar`` as a new +- Build ``CFTimeIndex.__repr__`` explicitly as :py:class:`pandas.Index`. Add ``calendar`` as a new property for :py:class:`CFTimeIndex` and show ``calendar`` and ``length`` in - :py:meth:`CFTimeIndex.__repr__` (:issue:`2416`, :pull:`4092`) - `Aaron Spring `_. + ``CFTimeIndex.__repr__`` (:issue:`2416`, :pull:`4092`) + By `Aaron Spring `_. - Relaxed the :ref:`mindeps_policy` to support: - all versions of setuptools released in the last 42 months (but no older than 38.4) @@ -124,7 +124,7 @@ Breaking changes `_. (:pull:`3274`) By `Elliott Sales de Andrade `_ -- The old :py:func:`auto_combine` function has now been removed in +- The old ``auto_combine`` function has now been removed in favour of the :py:func:`combine_by_coords` and :py:func:`combine_nested` functions. This also means that the default behaviour of :py:func:`open_mfdataset` has changed to use @@ -138,7 +138,7 @@ New Features ~~~~~~~~~~~~ - :py:meth:`DataArray.argmin` and :py:meth:`DataArray.argmax` now support sequences of 'dim' arguments, and if a sequence is passed return a dict - (which can be passed to :py:meth:`isel` to get the value of the minimum) of + (which can be passed to :py:meth:`DataArray.isel` to get the value of the minimum) of the indices for each dimension of the minimum or maximum of a DataArray. (:pull:`3936`) By `John Omotani `_, thanks to `Keisuke Fujii @@ -1144,7 +1144,7 @@ New functions/methods ``combine_by_coords`` to combine datasets along multiple dimensions, by specifying the argument ``combine='nested'`` or ``combine='by_coords'``. - The older function :py:func:`~xarray.auto_combine` has been deprecated, + The older function ``auto_combine`` has been deprecated, because its functionality has been subsumed by the new functions. To avoid FutureWarnings switch to using ``combine_nested`` or ``combine_by_coords``, (or set the ``combine`` argument in diff --git a/xarray/backends/api.py b/xarray/backends/api.py index 8d7c2230b2d..b84a80c8232 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -307,7 +307,7 @@ def open_dataset( Parameters ---------- - filename_or_obj : str, Path, file or xarray.backends.*DataStore + filename_or_obj : str, Path, file-like or DataStore Strings and Path objects are interpreted as a path to a netCDF file or an OpenDAP URL and opened with python-netCDF4, unless the filename ends with .gz, in which case the file is gunzipped and opened with @@ -343,16 +343,16 @@ def open_dataset( decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. - engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib', \ - 'pseudonetcdf'}, optional + engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \ + "pseudonetcdf"}, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for - 'netcdf4'. + "netcdf4". chunks : int or dict, optional If chunks is provided, it used to load the new dataset into dask arrays. ``chunks={}`` loads the dataset with dask using a single chunk for all arrays. - lock : False or duck threading.Lock, optional + lock : False or lock-like, optional Resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently @@ -364,17 +364,17 @@ def open_dataset( argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. - drop_variables: string or iterable, optional + drop_variables: str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. - backend_kwargs: dictionary, optional + backend_kwargs: dict, optional A dictionary of keyword arguments to pass on to the backend. This may be useful when backend options would improve performance or allow user control of dataset processing. use_cftime: bool, optional Only relevant if encoded dates come from a standard calendar - (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not + (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to @@ -384,7 +384,7 @@ def open_dataset( raise an error. decode_timedelta : bool, optional If True, decode variables and coordinates with time units in - {'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'} + {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of decode_time. @@ -578,7 +578,7 @@ def open_dataarray( Parameters ---------- - filename_or_obj : str, Path, file or xarray.backends.*DataStore + filename_or_obj : str, Path, file-like or DataStore Strings and Paths are interpreted as a path to a netCDF file or an OpenDAP URL and opened with python-netCDF4, unless the filename ends with .gz, in which case the file is gunzipped and opened with @@ -610,15 +610,15 @@ def open_dataarray( decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. - engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \ + engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib"}, \ optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for - 'netcdf4'. + "netcdf4". chunks : int or dict, optional If chunks is provided, it used to load the new dataset into dask arrays. - lock : False or duck threading.Lock, optional + lock : False or lock-like, optional Resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently @@ -630,17 +630,17 @@ def open_dataarray( argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. - drop_variables: string or iterable, optional + drop_variables: str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. - backend_kwargs: dictionary, optional + backend_kwargs: dict, optional A dictionary of keyword arguments to pass on to the backend. This may be useful when backend options would improve performance or allow user control of dataset processing. use_cftime: bool, optional Only relevant if encoded dates come from a standard calendar - (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not + (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to @@ -650,7 +650,7 @@ def open_dataarray( raise an error. decode_timedelta : bool, optional If True, decode variables and coordinates with time units in - {'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'} + {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of decode_time. @@ -772,77 +772,77 @@ def open_mfdataset( particular dimension. Default is None, which for a 1D list of filepaths is equivalent to opening the files separately and then merging them with ``xarray.merge``. - combine : {'by_coords', 'nested'}, optional + combine : {"by_coords", "nested"}, optional Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to combine all the data. Default is to use ``xarray.combine_by_coords``. - compat : {'identical', 'equals', 'broadcast_equals', - 'no_conflicts', 'override'}, optional + compat : {"identical", "equals", "broadcast_equals", \ + "no_conflicts", "override"}, optional String indicating how to compare variables of the same name for potential conflicts when merging: - * 'broadcast_equals': all values must be equal when variables are + * "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - * 'equals': all values and dimensions must be the same. - * 'identical': all values, dimensions and attributes must be the + * "equals": all values and dimensions must be the same. + * "identical": all values, dimensions and attributes must be the same. - * 'no_conflicts': only values which are not null in both datasets + * "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - * 'override': skip comparing and pick variable from first dataset + * "override": skip comparing and pick variable from first dataset preprocess : callable, optional If provided, call this function on each dataset prior to concatenation. You can find the file-name from which each dataset was loaded in - ``ds.encoding['source']``. - engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \ + ``ds.encoding["source"]``. + engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib"}, \ optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for - 'netcdf4'. - lock : False or duck threading.Lock, optional + "netcdf4". + lock : False or lock-like, optional Resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. - data_vars : {'minimal', 'different', 'all' or list of str}, optional + data_vars : {"minimal", "different", "all"} or list of str, optional These data variables will be concatenated together: - * 'minimal': Only data variables in which the dimension already + * "minimal": Only data variables in which the dimension already appears are included. - * 'different': Data variables which are not equal (ignoring + * "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. - * 'all': All data variables will be concatenated. + * "all": All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in - addition to the 'minimal' data variables. - coords : {'minimal', 'different', 'all' or list of str}, optional + addition to the "minimal" data variables. + coords : {"minimal", "different", "all"} or list of str, optional These coordinate variables will be concatenated together: - * 'minimal': Only coordinates in which the dimension already appears + * "minimal": Only coordinates in which the dimension already appears are included. - * 'different': Coordinates which are not equal (ignoring attributes) + * "different": Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. - * 'all': All coordinate variables will be concatenated, except + * "all": All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of str: The listed coordinate variables will be concatenated, - in addition the 'minimal' coordinates. + in addition the "minimal" coordinates. parallel : bool, optional If True, the open and preprocess steps of this function will be performed in parallel using ``dask.delayed``. Default is False. - join : {'outer', 'inner', 'left', 'right', 'exact, 'override'}, optional + join : {"outer", "inner", "left", "right", "exact, "override"}, optional String indicating how to combine differing indexes (excluding concat_dim) in objects - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': instead of aligning, raise `ValueError` when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - - 'override': if indexes are of same size, rewrite indexes to be + - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. attrs_file : str or pathlib.Path, optional @@ -1142,15 +1142,15 @@ def save_mfdataset( Parameters ---------- - datasets : list of xarray.Dataset + datasets : list of Dataset List of datasets to save. - paths : list of str or list of Paths + paths : list of str or list of Path List of paths to which to save each corresponding dataset. - mode : {'w', 'a'}, optional - Write ('w') or append ('a') mode. If mode='w', any existing file at + mode : {"w", "a"}, optional + Write ("w") or append ("a") mode. If mode="w", any existing file at these locations will be overwritten. - format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', - 'NETCDF3_CLASSIC'}, optional + format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \ + "NETCDF3_CLASSIC"}, optional File format for the resulting netCDF file: @@ -1173,14 +1173,14 @@ def save_mfdataset( NETCDF3_64BIT format (scipy does not support netCDF4). groups : list of str, optional Paths to the netCDF4 group in each corresponding file to which to save - datasets (only works for format='NETCDF4'). The groups will be created + datasets (only works for format="NETCDF4"). The groups will be created if necessary. - engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional + engine : {"netcdf4", "scipy", "h5netcdf"}, optional Engine to use when writing netCDF files. If not provided, the default engine is chosen based on available dependencies, with a - preference for 'netcdf4' if writing to a file on disk. + preference for "netcdf4" if writing to a file on disk. See `Dataset.to_netcdf` for additional information. - compute: boolean + compute : bool If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. diff --git a/xarray/backends/file_manager.py b/xarray/backends/file_manager.py index 4967788a1e7..549426b5d07 100644 --- a/xarray/backends/file_manager.py +++ b/xarray/backends/file_manager.py @@ -175,7 +175,8 @@ def acquire(self, needs_lock=True): Returns ------- - An open file object, as returned by ``opener(*args, **kwargs)``. + file-like + An open file object, as returned by ``opener(*args, **kwargs)``. """ file, _ = self._acquire_with_cache_info(needs_lock) return file diff --git a/xarray/backends/zarr.py b/xarray/backends/zarr.py index 540759a1c4c..3c85ae0b976 100644 --- a/xarray/backends/zarr.py +++ b/xarray/backends/zarr.py @@ -189,7 +189,7 @@ def extract_zarr_variable_encoding(variable, raise_on_invalid=False, name=None): Parameters ---------- - variable : xarray.Variable + variable : Variable raise_on_invalid : bool, optional Returns @@ -233,12 +233,12 @@ def encode_zarr_variable(var, needs_copy=True, name=None): Parameters ---------- - var : xarray.Variable + var : Variable A variable holding un-encoded data. Returns ------- - out : xarray.Variable + out : Variable A variable which has been encoded as described above. """ @@ -556,7 +556,7 @@ def open_zarr( decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. - drop_variables : string or iterable, optional + drop_variables : str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. diff --git a/xarray/coding/cftime_offsets.py b/xarray/coding/cftime_offsets.py index a2306331ca7..4e77530dfdb 100644 --- a/xarray/coding/cftime_offsets.py +++ b/xarray/coding/cftime_offsets.py @@ -795,19 +795,19 @@ def cftime_range( Left bound for generating dates. end : str or cftime.datetime, optional Right bound for generating dates. - periods : integer, optional + periods : int, optional Number of periods to generate. - freq : str, default 'D', BaseCFTimeOffset, or None - Frequency strings can have multiples, e.g. '5H'. - normalize : bool, default False + freq : str or None, default: "D" + Frequency strings can have multiples, e.g. "5H". + normalize : bool, default: False Normalize start/end dates to midnight before generating date range. - name : str, default None + name : str, default: None Name of the resulting index - closed : {None, 'left', 'right'}, optional + closed : {"left", "right"} or None, default: None Make the interval closed with respect to the given frequency to the - 'left', 'right', or both sides (None, the default). - calendar : str - Calendar type for the datetimes (default 'standard'). + "left", "right", or both sides (None). + calendar : str, default: "standard" + Calendar type for the datetimes. Returns ------- @@ -946,7 +946,7 @@ def cftime_range( As in the standard pandas function, three of the ``start``, ``end``, ``periods``, or ``freq`` arguments must be specified at a given time, with the other set to ``None``. See the `pandas documentation - `_ + `_ for more examples of the behavior of ``date_range`` with each of the parameters. diff --git a/xarray/coding/cftimeindex.py b/xarray/coding/cftimeindex.py index 0e1415079fb..85c6ee0809c 100644 --- a/xarray/coding/cftimeindex.py +++ b/xarray/coding/cftimeindex.py @@ -275,7 +275,7 @@ class CFTimeIndex(pd.Index): ---------- data : array or CFTimeIndex Sequence of cftime.datetime objects to use in index - name : str, default None + name : str, default: None Name of the resulting index See Also @@ -698,7 +698,7 @@ def floor(self, freq): Parameters ---------- - freq : str or CFTimeOffset + freq : str The frequency level to round the index to. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases `_ @@ -715,7 +715,7 @@ def ceil(self, freq): Parameters ---------- - freq : str or CFTimeOffset + freq : str The frequency level to round the index to. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases `_ @@ -732,7 +732,7 @@ def round(self, freq): Parameters ---------- - freq : str or CFTimeOffset + freq : str The frequency level to round the index to. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases `_ diff --git a/xarray/coding/frequencies.py b/xarray/coding/frequencies.py index 86f84ba5fbd..fa11d05923f 100644 --- a/xarray/coding/frequencies.py +++ b/xarray/coding/frequencies.py @@ -61,7 +61,7 @@ def infer_freq(index): Parameters ---------- - index : CFTimeIndex, DataArray, pd.DatetimeIndex, pd.TimedeltaIndex, pd.Series + index : CFTimeIndex, DataArray, DatetimeIndex, TimedeltaIndex, Series If not passed a CFTimeIndex, this simply calls `pandas.infer_freq`. If passed a Series or a DataArray will use the values of the series (NOT THE INDEX). diff --git a/xarray/conventions.py b/xarray/conventions.py index 700dcbc0fc4..da5ad7eea85 100644 --- a/xarray/conventions.py +++ b/xarray/conventions.py @@ -230,12 +230,12 @@ def encode_cf_variable(var, needs_copy=True, name=None): Parameters ---------- - var : xarray.Variable + var : Variable A variable holding un-encoded data. Returns ------- - out : xarray.Variable + out : Variable A variable which has been encoded as described above. """ ensure_not_multiindex(var, name=name) @@ -278,28 +278,28 @@ def decode_cf_variable( Parameters ---------- - name: str + name : str Name of the variable. Used for better error messages. var : Variable A variable holding potentially CF encoded information. concat_characters : bool Should character arrays be concatenated to strings, for - example: ['h', 'e', 'l', 'l', 'o'] -> 'hello' - mask_and_scale: bool + example: ["h", "e", "l", "l", "o"] -> "hello" + mask_and_scale : bool Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). If the _Unsigned attribute is present treat integer arrays as unsigned. decode_times : bool - Decode cf times ('hours since 2000-01-01') to np.datetime64. + Decode cf times ("hours since 2000-01-01") to np.datetime64. decode_endianness : bool Decode arrays from non-native to native endianness. stack_char_dim : bool Whether to stack characters into bytes along the last dimension of this array. Passed as an argument because we need to look at the full dataset to figure out if this is appropriate. - use_cftime: bool, optional + use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar - (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not + (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to @@ -533,23 +533,23 @@ def decode_cf( Object to decode. concat_characters : bool, optional Should character arrays be concatenated to strings, for - example: ['h', 'e', 'l', 'l', 'o'] -> 'hello' - mask_and_scale: bool, optional + example: ["h", "e", "l", "l", "o"] -> "hello" + mask_and_scale : bool, optional Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). decode_times : bool, optional - Decode cf times (e.g., integers since 'hours since 2000-01-01') to + Decode cf times (e.g., integers since "hours since 2000-01-01") to np.datetime64. decode_coords : bool, optional Use the 'coordinates' attribute on variable (or the dataset itself) to identify coordinates. - drop_variables: string or iterable, optional + drop_variables : str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. - use_cftime: bool, optional + use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar - (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not + (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to @@ -559,7 +559,7 @@ def decode_cf( raise an error. decode_timedelta : bool, optional If True, decode variables and coordinates with time units in - {'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'} + {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of decode_time. @@ -621,12 +621,12 @@ def cf_decoder( A dictionary mapping from attribute name to value concat_characters : bool Should character arrays be concatenated to strings, for - example: ['h', 'e', 'l', 'l', 'o'] -> 'hello' + example: ["h", "e", "l", "l", "o"] -> "hello" mask_and_scale: bool Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). decode_times : bool - Decode cf times ('hours since 2000-01-01') to np.datetime64. + Decode cf times ("hours since 2000-01-01") to np.datetime64. Returns ------- diff --git a/xarray/core/accessor_dt.py b/xarray/core/accessor_dt.py index 630aaee142f..a84da37986e 100644 --- a/xarray/core/accessor_dt.py +++ b/xarray/core/accessor_dt.py @@ -104,9 +104,10 @@ def _round_field(values, name, freq): ---------- values : np.ndarray or dask.array-like Array-like container of datetime-like values - name : str (ceil, floor, round) + name : {"ceil", "floor", "round"} Name of rounding function - freq : a freq string indicating the rounding resolution + freq : str + a freq string indicating the rounding resolution Returns ------- @@ -190,8 +191,8 @@ def floor(self, freq): Parameters ---------- - freq : a freq string indicating the rounding resolution - e.g. 'D' for daily resolution + freq : str + a freq string indicating the rounding resolution e.g. "D" for daily resolution Returns ------- @@ -207,8 +208,8 @@ def ceil(self, freq): Parameters ---------- - freq : a freq string indicating the rounding resolution - e.g. 'D' for daily resolution + freq : str + a freq string indicating the rounding resolution e.g. "D" for daily resolution Returns ------- @@ -223,8 +224,8 @@ def round(self, freq): Parameters ---------- - freq : a freq string indicating the rounding resolution - e.g. 'D' for daily resolution + freq : str + a freq string indicating the rounding resolution e.g. "D" for daily resolution Returns ------- diff --git a/xarray/core/accessor_str.py b/xarray/core/accessor_str.py index 1f0c95af71e..a845ce1f642 100644 --- a/xarray/core/accessor_str.py +++ b/xarray/core/accessor_str.py @@ -118,7 +118,7 @@ def get(self, i, default=""): Returns ------- - items : array of objects + items : array of object """ s = slice(-1, None) if i == -1 else slice(i, i + 1) @@ -344,15 +344,15 @@ def count(self, pat, flags=0): This function is used to count the number of times a particular regex pattern is repeated in each of the string elements of the - :class:`~xarray.DatArray`. + :class:`~xarray.DataArray`. Parameters ---------- pat : str Valid regular expression. - flags : int, default 0, meaning no flags - Flags for the `re` module. For a complete list, `see here - `_. + flags : int, default: 0 + Flags for the `re` module. Use 0 for no flags. For a complete list, + `see here `_. Returns ------- @@ -410,9 +410,9 @@ def pad(self, width, side="left", fillchar=" "): width : int Minimum width of resulting string; additional characters will be filled with character defined in `fillchar`. - side : {'left', 'right', 'both'}, default 'left' + side : {"left", "right", "both"}, default: "left" Side from which to fill resulting string. - fillchar : str, default ' ' + fillchar : str, default: " " Additional character for filling, default is whitespace. Returns @@ -445,7 +445,7 @@ def center(self, width, fillchar=" "): width : int Minimum width of resulting string; additional characters will be filled with ``fillchar`` - fillchar : str + fillchar : str, default: " " Additional character for filling, default is whitespace Returns @@ -463,7 +463,7 @@ def ljust(self, width, fillchar=" "): width : int Minimum width of resulting string; additional characters will be filled with ``fillchar`` - fillchar : str + fillchar : str, default: " " Additional character for filling, default is whitespace Returns @@ -481,7 +481,7 @@ def rjust(self, width, fillchar=" "): width : int Minimum width of resulting string; additional characters will be filled with ``fillchar`` - fillchar : str + fillchar : str, default: " " Additional character for filling, default is whitespace Returns @@ -521,11 +521,12 @@ def contains(self, pat, case=True, flags=0, regex=True): ---------- pat : str Character sequence or regular expression. - case : bool, default True + case : bool, default: True If True, case sensitive. - flags : int, default 0 (no flags) + flags : int, default: 0 Flags to pass through to the re module, e.g. re.IGNORECASE. - regex : bool, default True + ``0`` means no flags. + regex : bool, default: True If True, assumes the pat is a regular expression. If False, treats the pat as a literal string. @@ -562,12 +563,12 @@ def match(self, pat, case=True, flags=0): Parameters ---------- - pat : string + pat : str Character sequence or regular expression - case : boolean, default True + case : bool, default: True If True, case sensitive - flags : int, default 0 (no flags) - re module flags, e.g. re.IGNORECASE + flags : int, default: 0 + re module flags, e.g. re.IGNORECASE. ``0`` means no flags Returns ------- @@ -590,11 +591,11 @@ def strip(self, to_strip=None, side="both"): Parameters ---------- - to_strip : str or None, default None + to_strip : str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. - side : {'left', 'right', 'both'}, default 'left' + side : {"left", "right", "both"}, default: "left" Side from which to strip. Returns @@ -624,7 +625,7 @@ def lstrip(self, to_strip=None): Parameters ---------- - to_strip : str or None, default None + to_strip : str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. @@ -644,7 +645,7 @@ def rstrip(self, to_strip=None): Parameters ---------- - to_strip : str or None, default None + to_strip : str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. @@ -727,12 +728,12 @@ def find(self, sub, start=0, end=None, side="left"): Left edge index end : int Right edge index - side : {'left', 'right'}, default 'left' + side : {"left", "right"}, default: "left" Starting side for search. Returns ------- - found : array of integer values + found : array of int """ sub = self._obj.dtype.type(sub) @@ -767,7 +768,7 @@ def rfind(self, sub, start=0, end=None): Returns ------- - found : array of integer values + found : array of int """ return self.find(sub, start=start, end=end, side="right") @@ -786,12 +787,12 @@ def index(self, sub, start=0, end=None, side="left"): Left edge index end : int Right edge index - side : {'left', 'right'}, default 'left' + side : {"left", "right"}, default: "left" Starting side for search. Returns ------- - found : array of integer values + found : array of int """ sub = self._obj.dtype.type(sub) @@ -827,7 +828,7 @@ def rindex(self, sub, start=0, end=None): Returns ------- - found : array of integer values + found : array of int """ return self.index(sub, start=start, end=end, side="right") @@ -837,22 +838,22 @@ def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True): Parameters ---------- - pat : string or compiled regex + pat : str or re.Pattern String can be a character sequence or regular expression. - repl : string or callable + repl : str or callable Replacement string or a callable. The callable is passed the regex match object and must return a replacement string to be used. See :func:`re.sub`. - n : int, default -1 (all) - Number of replacements to make from start - case : boolean, default None + n : int, default: -1 + Number of replacements to make from start. Use ``-1`` to replace all. + case : bool, default: None - If True, case sensitive (the default if `pat` is a string) - Set to False for case insensitive - Cannot be set if `pat` is a compiled regex - flags : int, default 0 (no flags) - - re module flags, e.g. re.IGNORECASE + flags : int, default: 0 + - re module flags, e.g. re.IGNORECASE. Use ``0`` for no flags. - Cannot be set if `pat` is a compiled regex - regex : boolean, default True + regex : bool, default: True - If True, assumes the passed-in pattern is a regular expression. - If False, treats the pattern as a literal string - Cannot be set to False if `pat` is a compiled regex or `repl` is diff --git a/xarray/core/alignment.py b/xarray/core/alignment.py index abc180e049c..a7fcdc280ff 100644 --- a/xarray/core/alignment.py +++ b/xarray/core/alignment.py @@ -80,17 +80,17 @@ def align( ---------- *objects : Dataset or DataArray Objects to align. - join : {'outer', 'inner', 'left', 'right', 'exact', 'override'}, optional + join : {"outer", "inner", "left", "right", "exact", "override"}, optional Method for joining the indexes of the passed objects along each dimension: - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': instead of aligning, raise `ValueError` when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - - 'override': if indexes are of same size, rewrite indexes to be + - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. copy : bool, optional @@ -108,8 +108,9 @@ def align( Returns ------- - aligned : same as `*objects` - Tuple of objects with aligned coordinates. + aligned : DataArray or Dataset + Tuple of objects with the same type as `*objects` with aligned + coordinates. Raises ------ @@ -664,14 +665,14 @@ def broadcast(*args, exclude=None): Parameters ---------- - *args : DataArray or Dataset objects + *args : DataArray or Dataset Arrays to broadcast against each other. exclude : sequence of str, optional Dimensions that must not be broadcasted Returns ------- - broadcast : tuple of xarray objects + broadcast : tuple of DataArray or tuple of Dataset The same data as the input arrays, but with additional dimensions inserted so that all data arrays have the same dimensions and shape. diff --git a/xarray/core/combine.py b/xarray/core/combine.py index 58bd7178fa2..ed582cc563f 100644 --- a/xarray/core/combine.py +++ b/xarray/core/combine.py @@ -362,7 +362,7 @@ def combine_nested( Parameters ---------- - datasets : list or nested list of xarray.Dataset objects. + datasets : list or nested list of Dataset Dataset objects to combine. If concatenation or merging along more than one dimension is desired, then datasets must be supplied in a nested list-of-lists. @@ -375,48 +375,48 @@ def combine_nested( nested-list input along which to merge. Must be the same length as the depth of the list passed to ``datasets``. - compat : {'identical', 'equals', 'broadcast_equals', - 'no_conflicts', 'override'}, optional + compat : {"identical", "equals", "broadcast_equals", \ + "no_conflicts", "override"}, optional String indicating how to compare variables of the same name for potential merge conflicts: - - 'broadcast_equals': all values must be equal when variables are + - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - - 'equals': all values and dimensions must be the same. - - 'identical': all values, dimensions and attributes must be the + - "equals": all values and dimensions must be the same. + - "identical": all values, dimensions and attributes must be the same. - - 'no_conflicts': only values which are not null in both datasets + - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - - 'override': skip comparing and pick variable from first dataset - data_vars : {'minimal', 'different', 'all' or list of str}, optional + - "override": skip comparing and pick variable from first dataset + data_vars : {"minimal", "different", "all" or list of str}, optional Details are in the documentation of concat - coords : {'minimal', 'different', 'all' or list of str}, optional + coords : {"minimal", "different", "all" or list of str}, optional Details are in the documentation of concat fill_value : scalar, optional Value to use for newly missing values - join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes (excluding concat_dim) in objects - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': instead of aligning, raise `ValueError` when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - - 'override': if indexes are of same size, rewrite indexes to be + - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. - combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, - default 'drop' + combine_attrs : {"drop", "identical", "no_conflicts", "override"}, \ + default: "drop" String indicating how to combine attrs of the objects being merged: - - 'drop': empty attrs on returned Dataset. - - 'identical': all attrs must be the same on every object. - - 'no_conflicts': attrs from all objects are combined, any that have + - "drop": empty attrs on returned Dataset. + - "identical": all attrs must be the same on every object. + - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - - 'override': skip comparing and copy attrs from the first dataset to + - "override": skip comparing and copy attrs from the first dataset to the result. Returns @@ -541,61 +541,61 @@ def combine_by_coords( ---------- datasets : sequence of xarray.Dataset Dataset objects to combine. - compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional + compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional String indicating how to compare variables of the same name for potential conflicts: - - 'broadcast_equals': all values must be equal when variables are + - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - - 'equals': all values and dimensions must be the same. - - 'identical': all values, dimensions and attributes must be the + - "equals": all values and dimensions must be the same. + - "identical": all values, dimensions and attributes must be the same. - - 'no_conflicts': only values which are not null in both datasets + - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - - 'override': skip comparing and pick variable from first dataset - data_vars : {'minimal', 'different', 'all' or list of str}, optional + - "override": skip comparing and pick variable from first dataset + data_vars : {"minimal", "different", "all" or list of str}, optional These data variables will be concatenated together: - * 'minimal': Only data variables in which the dimension already + * "minimal": Only data variables in which the dimension already appears are included. - * 'different': Data variables which are not equal (ignoring + * "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. - * 'all': All data variables will be concatenated. + * "all": All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in - addition to the 'minimal' data variables. + addition to the "minimal" data variables. - If objects are DataArrays, `data_vars` must be 'all'. - coords : {'minimal', 'different', 'all' or list of str}, optional - As per the 'data_vars' kwarg, but for coordinate variables. + If objects are DataArrays, `data_vars` must be "all". + coords : {"minimal", "different", "all" or list of str}, optional + As per the "data_vars" kwarg, but for coordinate variables. fill_value : scalar, optional Value to use for newly missing values. If None, raises a ValueError if the passed Datasets do not create a complete hypercube. - join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes (excluding concat_dim) in objects - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': instead of aligning, raise `ValueError` when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - - 'override': if indexes are of same size, rewrite indexes to be + - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. - combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, - default 'drop' + combine_attrs : {"drop", "identical", "no_conflicts", "override"}, \ + default: "drop" String indicating how to combine attrs of the objects being merged: - - 'drop': empty attrs on returned Dataset. - - 'identical': all attrs must be the same on every object. - - 'no_conflicts': attrs from all objects are combined, any that have + - "drop": empty attrs on returned Dataset. + - "identical": all attrs must be the same on every object. + - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - - 'override': skip comparing and copy attrs from the first dataset to + - "override": skip comparing and copy attrs from the first dataset to the result. Returns diff --git a/xarray/core/common.py b/xarray/core/common.py index bc5035b682e..4207aea3a25 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -408,7 +408,7 @@ def assign_coords(self, coords=None, **coords_kwargs): the first element the dimension name and the second element the values for this new coordinate. - **coords_kwargs : keyword, value pairs, optional + **coords_kwargs : optional The keyword arguments form of ``coords``. One of ``coords`` or ``coords_kwargs`` must be provided. @@ -484,8 +484,10 @@ def assign_attrs(self, *args, **kwargs): Parameters ---------- - args : positional arguments passed into ``attrs.update``. - kwargs : keyword arguments passed into ``attrs.update``. + args + positional arguments passed into ``attrs.update``. + kwargs + keyword arguments passed into ``attrs.update``. Returns ------- @@ -513,18 +515,21 @@ def pipe( Parameters ---------- - func : function + func : callable function to apply to this xarray object (Dataset/DataArray). ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the xarray object. - args : positional arguments passed into ``func``. - kwargs : a dictionary of keyword arguments passed into ``func``. + args + positional arguments passed into ``func``. + kwargs + a dictionary of keyword arguments passed into ``func``. Returns ------- - object : the return type of ``func``. + object : Any + the return type of ``func``. Notes ----- @@ -635,7 +640,7 @@ def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None): group : str, DataArray or IndexVariable Array whose unique values should be used to group this array. If a string, must be the name of a variable contained in this dataset. - squeeze : boolean, optional + squeeze : bool, optional If "group" is a dimension of any arrays in this dataset, `squeeze` controls whether the subarrays have a dimension of length 1 along that dimension or if the dimension is squeezed out. @@ -645,7 +650,7 @@ def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None): Returns ------- - grouped : GroupBy + grouped A `GroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. @@ -711,17 +716,17 @@ def groupby_bins( group : str, DataArray or IndexVariable Array whose binned values should be used to group this array. If a string, must be the name of a variable contained in this dataset. - bins : int or array of scalars + bins : int or array-like If bins is an int, it defines the number of equal-width bins in the range of x. However, in this case, the range of x is extended by .1% on each side to include the min or max values of x. If bins is a sequence it defines the bin edges allowing for non-uniform bin width. No extension of the range of x is done in this case. - right : boolean, optional + right : bool, default: True Indicates whether the bins include the rightmost edge or not. If right == True (the default), then the bins [1,2,3,4] indicate (1,2], (2,3], (3,4]. - labels : array or boolean, default None + labels : array-like or bool, default: None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, string bin labels are assigned by `pandas.cut`. @@ -729,7 +734,7 @@ def groupby_bins( The precision at which to store and display the bins labels. include_lowest : bool Whether the first interval should be left-inclusive or not. - squeeze : boolean, optional + squeeze : bool, default: True If "group" is a dimension of any arrays in this dataset, `squeeze` controls whether the subarrays have a dimension of length 1 along that dimension or if the dimension is squeezed out. @@ -739,7 +744,7 @@ def groupby_bins( Returns ------- - grouped : GroupBy + grouped A `GroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. The name of the group has the added suffix `_bins` in order to @@ -798,11 +803,11 @@ def rolling( dim: dict, optional Mapping from the dimension name to create the rolling iterator along (e.g. `time`) to its moving window size. - min_periods : int, default None + min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. - center : boolean, or a mapping, default False + center : bool or mapping, default: False Set the labels at the center of the window. keep_attrs : bool, optional If True, the object's attributes (`attrs`) will be copied from @@ -876,20 +881,13 @@ def rolling_exp( Parameters ---------- - window : A single mapping from a dimension name to window value, - optional - - dim : str - Name of the dimension to create the rolling exponential window - along (e.g., `time`). - window : int - Size of the moving window. The type of this is specified in - `window_type` - window_type : str, one of ['span', 'com', 'halflife', 'alpha'], - default 'span' + window : mapping of hashable to int, optional + A mapping from the name of the dimension to create the rolling + exponential window along (e.g. `time`) to the size of the moving window. + window_type : {"span", "com", "halflife", "alpha"}, default: "span" The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: - https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html + https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html **window_kwargs : optional The keyword arguments form of ``window``. One of window or window_kwargs must be provided. @@ -916,20 +914,15 @@ def coarsen( Parameters ---------- - dim: dict, optional + dim : mapping of hashable to int, optional Mapping from the dimension name to the window size. - - dim : str - Name of the dimension to create the rolling iterator - along (e.g., `time`). - window : int - Size of the moving window. - boundary : 'exact' | 'trim' | 'pad' + boundary : {"exact", "trim", "pad"}, default: "exact" If 'exact', a ValueError will be raised if dimension size is not a multiple of the window size. If 'trim', the excess entries are dropped. If 'pad', NA will be padded. - side : 'left' or 'right' or mapping from dimension to 'left' or 'right' - coord_func : function (name) that is applied to the coordinates, + side : {"left", "right"} or mapping of str to {"left", "right"} + coord_func : str or mapping of hashable to str, default: "mean" + function (name) that is applied to the coordinates, or a mapping from coordinate name to function (name). keep_attrs : bool, optional If True, the object's attributes (`attrs`) will be copied from @@ -1009,13 +1002,13 @@ def resample( dimension must be datetime-like. skipna : bool, optional Whether to skip missing values when aggregating in downsampling. - closed : 'left' or 'right', optional + closed : {"left", "right"}, optional Side of each interval to treat as closed. - label : 'left or 'right', optional + label : {"left", "right"}, optional Side of each interval to use for labeling. base : int, optional For frequencies that evenly subdivide 1 day, the "origin" of the - aggregated intervals. For example, for '24H' frequency, base could + aggregated intervals. For example, for "24H" frequency, base could range from 0 through 23. loffset : timedelta or str, optional Offset used to adjust the resampled time labels. Some pandas date @@ -1145,19 +1138,20 @@ def where(self, cond, other=dtypes.NA, drop: bool = False): Parameters ---------- - cond : DataArray or Dataset with boolean dtype - Locations at which to preserve this object's values. + cond : DataArray or Dataset + Locations at which to preserve this object's values. dtype must be `bool`. other : scalar, DataArray or Dataset, optional Value to use for locations in this object where ``cond`` is False. By default, these locations filled with NA. - drop : boolean, optional + drop : bool, optional If True, coordinate labels that only correspond to False values of the condition are dropped from the result. Mutually exclusive with ``other``. Returns ------- - Same xarray type as caller, with dtype float64. + DataArray or Dataset + Same xarray type as caller, with dtype float64. Examples -------- @@ -1266,8 +1260,8 @@ def isin(self, test_elements): Returns ------- - isin : same as object, bool - Has the same shape as this object. + isin : DataArray or Dataset + Has the same type and shape as this object, but with a bool dtype. Examples -------- @@ -1321,7 +1315,7 @@ def full_like(other, fill_value, dtype: DTypeLike = None): Parameters ---------- - other : DataArray, Dataset, or Variable + other : DataArray, Dataset or Variable The reference object in input fill_value : scalar Value to fill the new object with before returning it. @@ -1445,14 +1439,14 @@ def zeros_like(other, dtype: DTypeLike = None): Parameters ---------- - other : DataArray, Dataset, or Variable + other : DataArray, Dataset or Variable The reference object. The output will have the same dimensions and coordinates as this object. dtype : dtype, optional dtype of the new array. If omitted, it defaults to other.dtype. Returns ------- - out : same as object + out : DataArray, Dataset or Variable New object of zeros with the same shape and type as other. Examples diff --git a/xarray/core/computation.py b/xarray/core/computation.py index 9846b23ee6c..353aefb0e73 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -182,7 +182,7 @@ def build_output_coords( are OK, e.g., scalars, Variable, DataArray, Dataset. signature : _UfuncSignature Core dimensions signature for the operation. - exclude_dims : optional set + exclude_dims : set, optional Dimensions excluded from the operation. Coordinates along these dimensions are dropped. @@ -756,9 +756,9 @@ def apply_ufunc( the style of NumPy universal functions [1]_ (if this is not the case, set ``vectorize=True``). If this function returns multiple outputs, you must set ``output_core_dims`` as well. - *args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars + *args : Dataset, DataArray, GroupBy, Variable, numpy.ndarray, dask.array.Array or scalar Mix of labeled and/or unlabeled arrays to which to apply the function. - input_core_dims : Sequence[Sequence], optional + input_core_dims : sequence of sequence, optional List of the same length as ``args`` giving the list of core dimensions on each input argument that should not be broadcast. By default, we assume there are no core dimensions on any input arguments. @@ -770,7 +770,7 @@ def apply_ufunc( Core dimensions are automatically moved to the last axes of input variables before applying ``func``, which facilitates using NumPy style generalized ufuncs [2]_. - output_core_dims : List[tuple], optional + output_core_dims : list of tuple, optional List of the same length as the number of output arguments from ``func``, giving the list of core dimensions on each output that were not broadcast on the inputs. By default, we assume that ``func`` @@ -791,7 +791,7 @@ def apply_ufunc( :py:func:`numpy.vectorize`. This option exists for convenience, but is almost always slower than supplying a pre-vectorized function. Using this option requires NumPy version 1.12 or newer. - join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + join : {"outer", "inner", "left", "right", "exact"}, default: "exact" Method for joining the indexes of the passed objects along each dimension, and the variables of Dataset objects with mismatched data variables: @@ -802,7 +802,7 @@ def apply_ufunc( - 'right': use indexes from the last object with each dimension - 'exact': raise `ValueError` instead of aligning when indexes to be aligned are not equal - dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + dataset_join : {"outer", "inner", "left", "right", "exact"}, default: "exact" Method for joining variables of Dataset objects with mismatched data variables. @@ -815,11 +815,11 @@ def apply_ufunc( Value used in place of missing variables on Dataset inputs when the datasets do not share the exact same ``data_vars``. Required if ``dataset_join not in {'inner', 'exact'}``, otherwise ignored. - keep_attrs: boolean, Optional + keep_attrs: bool, optional Whether to copy attributes from the first argument to the output. kwargs: dict, optional Optional keyword arguments passed directly on to call ``func``. - dask: 'forbidden', 'allowed' or 'parallelized', optional + dask: {"forbidden", "allowed", "parallelized"}, default: "forbidden" How to handle applying to objects containing lazy data in the form of dask arrays: @@ -834,7 +834,7 @@ def apply_ufunc( Optional keyword arguments passed to ``dask.array.apply_gufunc`` if dask='parallelized'. Possible keywords are ``output_sizes``, ``allow_rechunk`` and ``meta``. - output_dtypes : list of dtypes, optional + output_dtypes : list of dtype, optional Optional list of output dtypes. Only used if ``dask='parallelized'`` or vectorize=True. output_sizes : dict, optional @@ -1075,9 +1075,9 @@ def cov(da_a, da_b, dim=None, ddof=1): Parameters ---------- - da_a: DataArray object + da_a: DataArray Array to compute. - da_b: DataArray object + da_b: DataArray Array to compute. dim : str, optional The dimension along which the covariance will be computed @@ -1155,9 +1155,9 @@ def corr(da_a, da_b, dim=None): Parameters ---------- - da_a: DataArray object + da_a: DataArray Array to compute. - da_b: DataArray object + da_b: DataArray Array to compute. dim: str, optional The dimension along which the correlation will be computed @@ -1269,18 +1269,18 @@ def dot(*arrays, dims=None, **kwargs): Parameters ---------- - arrays: DataArray (or Variable) objects + arrays : DataArray or Variable Arrays to compute. - dims: '...', str or tuple of strings, optional + dims : ..., str or tuple of str, optional Which dimensions to sum over. Ellipsis ('...') sums over all dimensions. If not specified, then all the common dimensions are summed over. - **kwargs: dict + **kwargs : dict Additional keyword arguments passed to numpy.einsum or dask.array.einsum Returns ------- - dot: DataArray + DataArray Examples -------- @@ -1414,22 +1414,24 @@ def where(cond, x, y): Performs xarray-like broadcasting across input arguments. + All dimension coordinates on `x` and `y` must be aligned with each + other and with `cond`. + + Parameters ---------- - cond : scalar, array, Variable, DataArray or Dataset with boolean dtype + cond : scalar, array, Variable, DataArray or Dataset When True, return values from `x`, otherwise returns values from `y`. x : scalar, array, Variable, DataArray or Dataset values to choose from where `cond` is True y : scalar, array, Variable, DataArray or Dataset values to choose from where `cond` is False - All dimension coordinates on these objects must be aligned with each - other and with `cond`. - Returns ------- - In priority order: Dataset, DataArray, Variable or array, whichever - type appears as an input argument. + Dataset, DataArray, Variable or array + In priority order: Dataset, DataArray, Variable or array, whichever + type appears as an input argument. Examples -------- @@ -1511,7 +1513,7 @@ def polyval(coord, coeffs, degree_dim="degree"): The 1D coordinate along which to evaluate the polynomial. coeffs : DataArray Coefficients of the polynomials. - degree_dim : str, default "degree" + degree_dim : str, default: "degree" Name of the polynomial degree dimension in `coeffs`. See also diff --git a/xarray/core/concat.py b/xarray/core/concat.py index b42c91c232d..fa3fac92277 100644 --- a/xarray/core/concat.py +++ b/xarray/core/concat.py @@ -23,7 +23,7 @@ def concat( Parameters ---------- - objs : sequence of Dataset and DataArray objects + objs : sequence of Dataset and DataArray xarray objects to concatenate together. Each object is expected to consist of variables and coordinates with matching shapes except for along the concatenated dimension. @@ -34,74 +34,74 @@ def concat( unchanged. If dimension is provided as a DataArray or Index, its name is used as the dimension to concatenate along and the values are added as a coordinate. - data_vars : {'minimal', 'different', 'all' or list of str}, optional + data_vars : {"minimal", "different", "all"} or list of str, optional These data variables will be concatenated together: - * 'minimal': Only data variables in which the dimension already + * "minimal": Only data variables in which the dimension already appears are included. - * 'different': Data variables which are not equal (ignoring + * "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. - * 'all': All data variables will be concatenated. + * "all": All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in - addition to the 'minimal' data variables. + addition to the "minimal" data variables. - If objects are DataArrays, data_vars must be 'all'. - coords : {'minimal', 'different', 'all' or list of str}, optional + If objects are DataArrays, data_vars must be "all". + coords : {"minimal", "different", "all"} or list of str, optional These coordinate variables will be concatenated together: - * 'minimal': Only coordinates in which the dimension already appears + * "minimal": Only coordinates in which the dimension already appears are included. - * 'different': Coordinates which are not equal (ignoring attributes) + * "different": Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. - * 'all': All coordinate variables will be concatenated, except + * "all": All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of str: The listed coordinate variables will be concatenated, - in addition to the 'minimal' coordinates. - compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional + in addition to the "minimal" coordinates. + compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional String indicating how to compare non-concatenated variables of the same name for potential conflicts. This is passed down to merge. - - 'broadcast_equals': all values must be equal when variables are + - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - - 'equals': all values and dimensions must be the same. - - 'identical': all values, dimensions and attributes must be the + - "equals": all values and dimensions must be the same. + - "identical": all values, dimensions and attributes must be the same. - - 'no_conflicts': only values which are not null in both datasets + - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - - 'override': skip comparing and pick variable from first dataset + - "override": skip comparing and pick variable from first dataset positions : None or list of integer arrays, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. fill_value : scalar, optional Value to use for newly missing values - join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes (excluding dim) in objects - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': instead of aligning, raise `ValueError` when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - - 'override': if indexes are of same size, rewrite indexes to be + - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. - combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, - default 'override + combine_attrs : {"drop", "identical", "no_conflicts", "override"}, \ + default: "override" String indicating how to combine attrs of the objects being merged: - - 'drop': empty attrs on returned Dataset. - - 'identical': all attrs must be the same on every object. - - 'no_conflicts': attrs from all objects are combined, any that have + - "drop": empty attrs on returned Dataset. + - "identical": all attrs must be the same on every object. + - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - - 'override': skip comparing and copy attrs from the first dataset to + - "override": skip comparing and copy attrs from the first dataset to the result. Returns diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 4ad29baee04..426329e6a6e 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -282,7 +282,7 @@ def __init__( object, attempts are made to use this array's metadata to fill in other unspecified arguments. A view of the array's data is used instead of a copy if possible. - coords : sequence or dict of array_like objects, optional + coords : sequence or dict of array_like, optional Coordinates (tick labels) to use for indexing along each dimension. The following notations are accepted: @@ -492,7 +492,7 @@ def to_dataset( name : hashable, optional Name to substitute for this array's name. Only valid if ``dim`` is not provided. - promote_attrs : bool, default False + promote_attrs : bool, default: False Set to True to shallow copy attrs of DataArray to returned Dataset. Returns @@ -731,7 +731,7 @@ def reset_coords( Parameters ---------- - names : hashable or iterable of hashables, optional + names : hashable or iterable of hashable, optional Name(s) of non-index coordinates in this dataset to reset into variables. By default, all non-index coordinates are reset. drop : bool, optional @@ -979,7 +979,7 @@ def chunk( Parameters ---------- - chunks : int, tuple or mapping, optional + chunks : int, tuple of int or mapping of hashable to int, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. name_prefix : str, optional @@ -1024,7 +1024,7 @@ def isel( drop : bool, optional If ``drop=True``, drop coordinates variables indexed by integers instead of making them scalar. - missing_dims : {"raise", "warn", "ignore"}, default "raise" + missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception @@ -1111,7 +1111,7 @@ def sel( If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. - method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional + method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for inexact matches: * None (default): only exact matches @@ -1231,7 +1231,8 @@ def broadcast_like( Returns ------- - new_da: xr.DataArray + new_da : DataArray + The caller broadcasted against ``other``. Examples -------- @@ -1290,7 +1291,7 @@ def reindex_like( other object need not be the same as the indexes on this dataset. Any mis-matched index values will be filled in with NaN, and any mis-matched dimension names will simply be ignored. - method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional + method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for filling index values from other not found on this data array: @@ -1404,29 +1405,33 @@ def interp( ) -> "DataArray": """ Multidimensional interpolation of variables. + Parameters + ---------- coords : dict, optional Mapping from dimension names to the new coordinates. new coordinate can be an scalar, array-like or DataArray. If DataArrays are passed as new coordates, their dimensions are used for the broadcasting. - method: {'linear', 'nearest'} for multidimensional array, - {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} - for 1-dimensional array. - assume_sorted: boolean, optional + method : str, default: "linear" + The method used to interpolate. Choose from + + - {"linear", "nearest"} for multidimensional array, + - {"linear", "nearest", "zero", "slinear", "quadratic", "cubic"} for 1-dimensional array. + assume_sorted : bool, optional If False, values of x can be in any order and they are sorted first. If True, x has to be an array of monotonically increasing values. - kwargs: dictionary + kwargs : dict Additional keyword arguments passed to scipy's interpolator. Valid options and their behavior depend on if 1-dimensional or multi-dimensional interpolation is used. - ``**coords_kwargs`` : {dim: coordinate, ...}, optional + **coords_kwargs : {dim: coordinate, ...}, optional The keyword arguments form of ``coords``. One of coords or coords_kwargs must be provided. Returns ------- - interpolated: xr.DataArray + interpolated : DataArray New dataarray on the new coordinates. Notes @@ -1477,21 +1482,22 @@ def interp_like( Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. - method: string, optional. - {'linear', 'nearest'} for multidimensional array, - {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} - for 1-dimensional array. 'linear' is used by default. - assume_sorted: boolean, optional + method : str, default: "linear" + The method used to interpolate. Choose from + + - {"linear", "nearest"} for multidimensional array, + - {"linear", "nearest", "zero", "slinear", "quadratic", "cubic"} for 1-dimensional array. + assume_sorted : bool, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. - kwargs: dictionary, optional + kwargs : dict, optional Additional keyword passed to scipy's interpolator. Returns ------- - interpolated: xr.DataArray + interpolated : DataArray Another dataarray by interpolating this dataarray's data along the coordinates of the other object. @@ -1529,7 +1535,7 @@ def rename( If the argument is dict-like, it used as a mapping from old names to new names for coordinates. Otherwise, use the argument as the new name for this array. - **names: hashable, optional + **names : hashable, optional The keyword arguments form of a mapping from old names to new names for coordinates. One of new_name_or_name_dict or names must be provided. @@ -1622,20 +1628,20 @@ def expand_dims( Parameters ---------- - dim : hashable, sequence of hashable, dict, or None + dim : hashable, sequence of hashable, dict, or None, optional Dimensions to include on the new variable. If provided as str or sequence of str, then dimensions are inserted with length 1. If provided as a dict, then the keys are the new dimensions and the values are either integers (giving the length of the new dimensions) or sequence/ndarray (giving the coordinates of the new dimensions). - axis : integer, list (or tuple) of integers, or None + axis : int, list of int or tuple of int, or None, default: None Axis position(s) where new axis is to be inserted (position(s) on the result array). If a list (or tuple) of integers is passed, multiple axes are inserted. In this case, dim arguments should be same length list. If axis=None is passed, all the axes will be inserted to the start of the result array. - **dim_kwargs : int or sequence/ndarray + **dim_kwargs : int or sequence or ndarray The keywords are arbitrary dimensions being inserted and the values are either the lengths of the new dims (if int is given), or their coordinates. Note, this is an alternative to passing a dict to the @@ -1678,7 +1684,7 @@ def set_index( append : bool, optional If True, append the supplied index(es) to the existing index(es). Otherwise replace the existing index(es) (default). - **indexes_kwargs: optional + **indexes_kwargs : optional The keyword arguments form of ``indexes``. One of indexes or indexes_kwargs must be provided. @@ -1729,7 +1735,7 @@ def reset_index( Parameters ---------- - dims_or_levels : hashable or sequence of hashables + dims_or_levels : hashable or sequence of hashable Name(s) of the dimension(s) and/or multi-index level(s) that will be reset. drop : bool, optional @@ -1766,7 +1772,7 @@ def reorder_levels( Mapping from names matching dimensions and values given by lists representing new level orders. Every given dimension must have a multi-index. - **dim_order_kwargs: optional + **dim_order_kwargs : optional The keyword arguments form of ``dim_order``. One of dim_order or dim_order_kwargs must be provided. @@ -1802,12 +1808,13 @@ def stack( Parameters ---------- - dimensions : Mapping of the form new_name=(dim1, dim2, ...) + dimensions : mapping of hashable to sequence of hashable + Mapping of the form `new_name=(dim1, dim2, ...)`. Names of new dimensions, and the existing dimensions that they replace. An ellipsis (`...`) will be replaced by all unlisted dimensions. Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over all dimensions. - **dimensions_kwargs: + **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. @@ -1860,8 +1867,10 @@ def unstack( dim : hashable or sequence of hashable, optional Dimension(s) over which to unstack. By default unstacks all MultiIndexes. - fill_value: value to be filled. By default, np.nan - sparse: use sparse-array if True + fill_value : scalar, default: nan + value to be filled. + sparse : bool, default: False + use sparse-array if True Returns ------- @@ -1911,7 +1920,7 @@ def to_unstacked_dataset(self, dim, level=0): level : int or str The MultiIndex level to expand to a dataset along. Can either be the integer index of the level or its name. - label : int, default 0 + label : int, default: 0 Label of the level to expand dataset along. Overrides the label argument if given. @@ -1974,7 +1983,7 @@ def transpose(self, *dims: Hashable, transpose_coords: bool = True) -> "DataArra *dims : hashable, optional By default, reverse the dimensions. Otherwise, reorder the dimensions to this order. - transpose_coords : boolean, default True + transpose_coords : bool, default: True If True, also transpose the coordinates of this DataArray. Returns @@ -2016,9 +2025,9 @@ def drop_vars( Parameters ---------- - names : hashable or iterable of hashables + names : hashable or iterable of hashable Name(s) of variables to drop. - errors: {'raise', 'ignore'}, optional + errors: {"raise", "ignore"}, optional If 'raise' (default), raises a ValueError error if any of the variable passed are not in the dataset. If 'ignore', any given names that are in the DataArray are dropped and no error is raised. @@ -2062,9 +2071,9 @@ def drop_sel( Parameters ---------- - labels : Mapping[Hashable, Any] + labels : mapping of hashable to Any Index labels to drop - errors: {'raise', 'ignore'}, optional + errors : {"raise", "ignore"}, optional If 'raise' (default), raises a ValueError error if any of the index labels passed are not in the dataset. If 'ignore', any given labels that are in the @@ -2093,10 +2102,10 @@ def dropna( dim : hashable Dimension along which to drop missing values. Dropping along multiple dimensions simultaneously is not yet supported. - how : {'any', 'all'}, optional + how : {"any", "all"}, optional * any : if any NA values are present, drop that label * all : if all values are NA, drop that label - thresh : int, default None + thresh : int, default: None If supplied, require this many non-NA values. Returns @@ -2163,18 +2172,18 @@ def interpolate_na( - 'barycentric', 'krog', 'pchip', 'spline', 'akima': use their respective :py:class:`scipy.interpolate` classes. - use_coordinate : bool, str, default True + use_coordinate : bool or str, default: True Specifies which index to use as the x values in the interpolation formulated as `y = f(x)`. If False, values are treated as if eqaully-spaced along ``dim``. If True, the IndexVariable `dim` is used. If ``use_coordinate`` is a string, it specifies the name of a coordinate variariable to use as the index. - limit : int, default None + limit : int, default: None Maximum number of consecutive NaNs to fill. Must be greater than 0 or None for no limit. This filling is done regardless of the size of the gap in the data. To only interpolate over gaps less than a given length, see ``max_gap``. - max_gap: int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default None. + max_gap: int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default: None Maximum size of gap, a continuous sequence of NaNs, that will be filled. Use None for no limit. When interpolating along a datetime64 dimension and ``use_coordinate=True``, ``max_gap`` can be one of the following: @@ -2197,7 +2206,7 @@ def interpolate_na( * x (x) int64 0 1 2 3 4 5 6 7 8 The gap lengths are 3-0 = 3; 6-3 = 3; and 8-6 = 2 respectively - keep_attrs : bool, default True + keep_attrs : bool, default: True If True, the dataarray's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. @@ -2237,7 +2246,7 @@ def ffill(self, dim: Hashable, limit: int = None) -> "DataArray": dim : hashable Specifies the dimension along which to propagate values when filling. - limit : int, default None + limit : int, default: None The maximum number of consecutive NaN values to forward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater @@ -2261,7 +2270,7 @@ def bfill(self, dim: Hashable, limit: int = None) -> "DataArray": dim : str Specifies the dimension along which to propagate values when filling. - limit : int, default None + limit : int, default: None The maximum number of consecutive NaN values to backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater @@ -2306,11 +2315,11 @@ def reduce( Parameters ---------- - func : function + func : callable Function which can be called in the form `f(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. - dim : hashable or sequence of hashables, optional + dim : hashable or sequence of hashable, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional Axis(es) over which to repeatedly apply `func`. Only one of the @@ -2321,7 +2330,7 @@ def reduce( If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. - keepdims : bool, default False + keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one. Coordinates that use these dimensions are removed. @@ -2435,8 +2444,8 @@ def to_masked_array(self, copy: bool = True) -> np.ma.MaskedArray: Parameters ---------- - copy : bool - If True (default) make a copy of the array in the result. If False, + copy : bool, default: True + If True make a copy of the array in the result. If False, a MaskedArray view of DataArray.values is returned. Returns @@ -2507,23 +2516,27 @@ def from_dict(cls, d: dict) -> "DataArray": """ Convert a dictionary into an xarray.DataArray - Input dict can take several forms:: + Input dict can take several forms: - d = {'dims': ('t'), 'data': x} + .. code:: python - d = {'coords': {'t': {'dims': 't', 'data': t, - 'attrs': {'units':'s'}}}, - 'attrs': {'title': 'air temperature'}, - 'dims': 't', - 'data': x, - 'name': 'a'} + d = {"dims": ("t"), "data": x} - where 't' is the name of the dimesion, 'a' is the name of the array, + d = { + "coords": {"t": {"dims": "t", "data": t, "attrs": {"units": "s"}}}, + "attrs": {"title": "air temperature"}, + "dims": "t", + "data": x, + "name": "a", + } + + where "t" is the name of the dimesion, "a" is the name of the array, and x and t are lists, numpy.arrays, or pandas objects. Parameters ---------- - d : dict, with a minimum structure of {'dims': [..], 'data': [..]} + d : dict + Mapping with a minimum structure of {"dims": [...], "data": [...]} Returns ------- @@ -2660,7 +2673,7 @@ def identical(self, other: "DataArray") -> bool: See Also -------- DataArray.broadcast_equals - DataArray.equal + DataArray.equals """ try: return self.name == other.name and self._all_compat(other, "identical") @@ -2766,7 +2779,7 @@ def _title_for_slice(self, truncate: int = 50) -> str: Parameters ---------- - truncate : integer + truncate : int, default: 50 maximum number of characters for title Returns @@ -2849,13 +2862,13 @@ def shift( Parameters ---------- - shifts : Mapping with the form of {dim: offset} + shifts : mapping of hashable to int, optional Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value: scalar, optional Value to use for newly missing values - **shifts_kwargs: + **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. @@ -2898,7 +2911,7 @@ def roll( Parameters ---------- - shifts : Mapping with the form of {dim: offset} + shifts : mapping of hashable to int, optional Integer offset to rotate each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. @@ -2907,7 +2920,8 @@ def roll( The current default of roll_coords (None, equivalent to True) is deprecated and will change to False in a future version. Explicitly pass roll_coords to silence the warning. - **shifts_kwargs : The keyword arguments form of ``shifts``. + **shifts_kwargs + The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns @@ -2953,8 +2967,8 @@ def dot( ---------- other : DataArray The other array with which the dot product is performed. - dims: '...', hashable or sequence of hashables, optional - Which dimensions to sum over. Ellipsis ('...') sums over all dimensions. + dims : ..., hashable or sequence of hashable, optional + Which dimensions to sum over. Ellipsis (`...`) sums over all dimensions. If not specified, then all the common dimensions are summed over. Returns @@ -3018,15 +3032,15 @@ def sortby( Parameters ---------- - variables: hashable, DataArray, or sequence of either + variables : hashable, DataArray, or sequence of hashable or DataArray 1D DataArray objects or name(s) of 1D variable(s) in coords whose values are used to sort this array. - ascending: boolean, optional + ascending : bool, optional Whether to sort by ascending or descending order. Returns ------- - sorted: DataArray + sorted : DataArray A new dataarray where all the specified dims are sorted by dim labels. @@ -3067,11 +3081,11 @@ def quantile( Parameters ---------- - q : float in range of [0,1] or array-like of floats + q : float or array-like of float Quantile to compute, which must be between 0 and 1 inclusive. dim : hashable or sequence of hashable, optional Dimension(s) over which to apply quantile. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: @@ -3200,12 +3214,12 @@ def differentiate( Parameters ---------- - coord: hashable + coord : hashable The coordinate to be used to compute the gradient. - edge_order: 1 or 2. Default 1 + edge_order : {1, 2}, default: 1 N-th order accurate differences at the boundaries. - datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', - 'us', 'ns', 'ps', 'fs', 'as'} + datetime_unit : {"Y", "M", "W", "D", "h", "m", "s", "ms", \ + "us", "ns", "ps", "fs", "as"} or None, optional Unit to compute gradient. Only valid for datetime coordinate. Returns @@ -3258,12 +3272,11 @@ def integrate( Parameters ---------- - dim: hashable, or a sequence of hashable + dim : hashable, or sequence of hashable Coordinate(s) used for the integration. - datetime_unit: str, optional + datetime_unit : {"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", \ + "ps", "fs", "as"}, optional Can be used to specify the unit if datetime coordinate is used. - One of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', - 'fs', 'as'} Returns ------- @@ -3330,7 +3343,7 @@ def map_blocks( Parameters ---------- - func: callable + func : callable User-provided function that accepts a DataArray as its first parameter. The function will receive a subset or 'block' of this DataArray (see below), corresponding to one chunk along each chunked dimension. ``func`` will be @@ -3339,14 +3352,13 @@ def map_blocks( This function must return either a single DataArray or a single Dataset. This function cannot add a new chunked dimension. - - args: Sequence + args : sequence Passed to func after unpacking and subsetting any xarray objects by blocks. xarray objects in args must be aligned with this object, otherwise an error is raised. - kwargs: Mapping + kwargs : mapping Passed verbatim to func after unpacking. xarray objects, if any, will not be subset to blocks. Passing dask collections in kwargs is not allowed. - template: (optional) DataArray, Dataset + template : DataArray or Dataset, optional xarray object representing the final result after compute is called. If not provided, the function will be first run on mocked-up data, that looks like this object but has sizes 0, to determine properties of the returned object such as dtype, @@ -3448,13 +3460,13 @@ def polyfit( invalid values, False otherwise. rcond : float, optional Relative condition number to the fit. - w : Union[Hashable, Any], optional + w : hashable or array-like, optional Weights to apply to the y-coordinate of the sample points. Can be an array-like object or the name of a coordinate in the dataset. full : bool, optional Whether to return the residuals, matrix rank and singular values in addition to the coefficients. - cov : Union[bool, str], optional + cov : bool or str, optional Whether to return to the covariance matrix in addition to the coefficients. The matrix is not scaled if `cov='unscaled'`. @@ -3510,10 +3522,11 @@ def pad( Parameters ---------- - pad_width : Mapping with the form of {dim: (pad_before, pad_after)} - Number of values padded along each dimension. + pad_width : mapping of hashable to tuple of int + Mapping with the form of {dim: (pad_before, pad_after)} + describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad - mode : str + mode : str, default: "constant" One of the following string values (taken from numpy docs) 'constant' (default) @@ -3546,7 +3559,7 @@ def pad( Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. - stat_length : int, tuple or mapping of the form {dim: tuple} + stat_length : int, tuple or mapping of hashable to tuple, default: None Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique @@ -3556,7 +3569,7 @@ def pad( (stat_length,) or int is a shortcut for before = after = statistic length for all axes. Default is ``None``, to use the entire axis. - constant_values : scalar, tuple or mapping of the form {dim: tuple} + constant_values : scalar, tuple or mapping of hashable to tuple, default: 0 Used in 'constant'. The values to set the padded values for each axis. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique @@ -3566,7 +3579,7 @@ def pad( ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all dimensions. Default is 0. - end_values : scalar, tuple or mapping of the form {dim: tuple} + end_values : scalar, tuple or mapping of hashable to tuple, default: 0 Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique @@ -3576,12 +3589,12 @@ def pad( ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all axes. Default is 0. - reflect_type : {'even', 'odd'}, optional - Used in 'reflect', and 'symmetric'. The 'even' style is the + reflect_type : {"even", "odd"}, optional + Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For - the 'odd' style, the extended part of the array is created by + the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. - **pad_width_kwargs: + **pad_width_kwargs The keyword arguments form of ``pad_width``. One of ``pad_width`` or ``pad_width_kwargs`` must be provided. @@ -3672,18 +3685,18 @@ def idxmin( dim : str, optional Dimension over which to apply `idxmin`. This is optional for 1D arrays, but required for arrays with 2 or more dimensions. - skipna : bool or None, default None + skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). - fill_value : Any, default NaN + fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. - keep_attrs : bool, default False + keep_attrs : bool, default: False If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. @@ -3766,21 +3779,21 @@ def idxmax( Parameters ---------- - dim : str, optional + dim : hashable, optional Dimension over which to apply `idxmax`. This is optional for 1D arrays, but required for arrays with 2 or more dimensions. - skipna : bool or None, default None + skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). - fill_value : Any, default NaN + fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. - keep_attrs : bool, default False + keep_attrs : bool, default: False If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 55b4f6040b1..99e3b1e7581 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -1396,7 +1396,7 @@ def set_coords( Parameters ---------- - names : hashable or iterable of hashables + names : hashable or iterable of hashable Name(s) of variables in this dataset to convert into coordinates. Returns @@ -1431,7 +1431,7 @@ def reset_coords( Parameters ---------- - names : hashable or iterable of hashables, optional + names : hashable or iterable of hashable, optional Name(s) of non-index coordinates in this dataset to reset into variables. By default, all non-index coordinates are reset. drop : bool, optional @@ -1488,18 +1488,18 @@ def to_netcdf( Parameters ---------- - path : str, Path or file-like object, optional + path : str, Path or file-like, optional Path to which to save this dataset. File-like objects are only supported by the scipy engine. If no path is provided, this function returns the resulting netCDF file as bytes; in this case, we need to use scipy, which does not support netCDF version 4 (the default format becomes NETCDF3_64BIT). - mode : {'w', 'a'}, optional + mode : {"w", "a"}, default: "w" Write ('w') or append ('a') mode. If mode='w', any existing file at this location will be overwritten. If mode='a', existing variables will be overwritten. - format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', - 'NETCDF3_CLASSIC'}, optional + format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \ + "NETCDF3_CLASSIC"}, optional File format for the resulting netCDF file: * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API @@ -1522,19 +1522,19 @@ def to_netcdf( group : str, optional Path to the netCDF4 group in the given file to open (only works for format='NETCDF4'). The group(s) will be created if necessary. - engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional + engine : {"netcdf4", "scipy", "h5netcdf"}, optional Engine to use when writing netCDF files. If not provided, the default engine is chosen based on available dependencies, with a preference for 'netcdf4' if writing to a file on disk. encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., - ``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1, - 'zlib': True}, ...}`` + ``{"my_variable": {"dtype": "int16", "scale_factor": 0.1, + "zlib": True}, ...}`` The `h5netcdf` engine supports both the NetCDF4-style compression - encoding parameters ``{'zlib': True, 'complevel': 9}`` and the h5py - ones ``{'compression': 'gzip', 'compression_opts': 9}``. + encoding parameters ``{"zlib": True, "complevel": 9}`` and the h5py + ones ``{"compression": "gzip", "compression_opts": 9}``. This allows using any compression plugin installed in the HDF5 library, e.g. LZF. @@ -1542,14 +1542,14 @@ def to_netcdf( Dimension(s) that should be serialized as unlimited dimensions. By default, no dimensions are treated as unlimited dimensions. Note that unlimited_dims may also be set via - ``dataset.encoding['unlimited_dims']``. - compute: boolean + ``dataset.encoding["unlimited_dims"]``. + compute: bool, default: True If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. - invalid_netcdf: boolean - Only valid along with engine='h5netcdf'. If True, allow writing + invalid_netcdf: bool, default: False + Only valid along with ``engine="h5netcdf"``. If True, allow writing hdf5 files which are invalid netcdf as described in - https://github.com/shoyer/h5netcdf. Default: False. + https://github.com/shoyer/h5netcdf. """ if encoding is None: encoding = {} @@ -1589,12 +1589,12 @@ def to_zarr( ---------- store : MutableMapping, str or Path, optional Store or path to directory in file system. - mode : {'w', 'w-', 'a', None} - Persistence mode: 'w' means create (overwrite if exists); - 'w-' means create (fail if exists); - 'a' means override existing variables (create if does not exist). + mode : {"w", "w-", "a", None}, optional + Persistence mode: "w" means create (overwrite if exists); + "w-" means create (fail if exists); + "a" means override existing variables (create if does not exist). If ``append_dim`` is set, ``mode`` can be omitted as it is - internally set to ``'a'``. Otherwise, ``mode`` will default to + internally set to ``"a"``. Otherwise, ``mode`` will default to `w-` if not set. synchronizer : object, optional Array synchronizer @@ -1603,7 +1603,7 @@ def to_zarr( encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., - ``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,}, ...}`` + ``{"my_variable": {"dtype": "int16", "scale_factor": 0.1,}, ...}`` compute: bool, optional If True compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. @@ -1672,7 +1672,8 @@ def info(self, buf=None) -> None: Parameters ---------- - buf : writable buffer, defaults to sys.stdout + buf : file-like, default: sys.stdout + writable buffer See Also -------- @@ -1743,7 +1744,7 @@ def chunk( ---------- chunks : int, 'auto' or mapping, optional Chunk sizes along each dimension, e.g., ``5`` or - ``{'x': 5, 'y': 5}``. + ``{"x": 5, "y": 5}``. name_prefix : str, optional Prefix for the name of any new dask arrays. token : str, optional @@ -1920,7 +1921,7 @@ def isel( drop : bool, optional If ``drop=True``, drop coordinates variables indexed by integers instead of making them scalar. - missing_dims : {"raise", "warn", "ignore"}, default "raise" + missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the Dataset: - "raise": raise an exception @@ -2062,7 +2063,7 @@ def sel( If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. - method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional + method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for inexact matches: * None (default): only exact matches @@ -2215,7 +2216,7 @@ def thin( A dict with keys matching dimensions and integer values `n` or a single integer `n` applied over all dimensions. One of indexers or indexers_kwargs must be provided. - ``**indexers_kwargs`` : {dim: n, ...}, optional + **indexers_kwargs : {dim: n, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. @@ -2295,7 +2296,7 @@ def reindex_like( other object need not be the same as the indexes on this dataset. Any mis-matched index values will be filled in with NaN, and any mis-matched dimension names will simply be ignored. - method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional + method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for filling index values from other not found in this dataset: @@ -2349,13 +2350,13 @@ def reindex( Parameters ---------- - indexers : dict. optional + indexers : dict, optional Dictionary with keys given by dimension names and values given by arrays of coordinates tick labels. Any mis-matched coordinate values will be filled in with NaN, and any mis-matched dimension names will simply be ignored. One of indexers or indexers_kwargs must be provided. - method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional + method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for filling index values in ``indexers`` not found in this dataset: @@ -2374,7 +2375,8 @@ def reindex( the input. In either case, a new xarray object is always returned. fill_value : scalar, optional Value to use for newly missing values - sparse: use sparse-array. By default, False + sparse : bool, default: False + use sparse-array. **indexers_kwargs : {dim: indexer, ...}, optional Keyword arguments in the same form as ``indexers``. One of indexers or indexers_kwargs must be provided. @@ -2584,16 +2586,16 @@ def interp( New coordinate can be a scalar, array-like or DataArray. If DataArrays are passed as new coordates, their dimensions are used for the broadcasting. - method: string, optional. - {'linear', 'nearest'} for multidimensional array, - {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} - for 1-dimensional array. 'linear' is used by default. - assume_sorted: boolean, optional + method : str, optional + {"linear", "nearest"} for multidimensional array, + {"linear", "nearest", "zero", "slinear", "quadratic", "cubic"} + for 1-dimensional array. "linear" is used by default. + assume_sorted : bool, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. - kwargs: dictionary, optional + kwargs: dict, optional Additional keyword arguments passed to scipy's interpolator. Valid options and their behavior depend on if 1-dimensional or multi-dimensional interpolation is used. @@ -2603,7 +2605,7 @@ def interp( Returns ------- - interpolated: xr.Dataset + interpolated : Dataset New dataset on the new coordinates. Notes @@ -2712,21 +2714,21 @@ def interp_like( Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. - method: string, optional. - {'linear', 'nearest'} for multidimensional array, - {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} + method : str, optional + {"linear", "nearest"} for multidimensional array, + {"linear", "nearest", "zero", "slinear", "quadratic", "cubic"} for 1-dimensional array. 'linear' is used by default. - assume_sorted: boolean, optional + assume_sorted : bool, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. - kwargs: dictionary, optional + kwargs: dict, optional Additional keyword passed to scipy's interpolator. Returns ------- - interpolated: xr.Dataset + interpolated : Dataset Another dataset by interpolating this dataset's data along the coordinates of the other object. @@ -2813,7 +2815,7 @@ def rename( name_dict : dict-like, optional Dictionary whose keys are current variable or dimension names and whose values are the desired names. - **names, optional + **names : optional Keyword form of ``name_dict``. One of name_dict or names must be provided. @@ -2855,7 +2857,7 @@ def rename_dims( Dictionary whose keys are current dimension names and whose values are the desired names. The desired names must not be the name of an existing dimension or Variable in the Dataset. - **dims, optional + **dims : optional Keyword form of ``dims_dict``. One of dims_dict or dims must be provided. @@ -2899,7 +2901,7 @@ def rename_vars( name_dict : dict-like, optional Dictionary whose keys are current variable or coordinate names and whose values are the desired names. - **names, optional + **names : optional Keyword form of ``name_dict``. One of name_dict or names must be provided. @@ -3049,13 +3051,13 @@ def expand_dims( and the values are either integers (giving the length of the new dimensions) or array-like (giving the coordinates of the new dimensions). - axis : integer, sequence of integers, or None + axis : int, sequence of int, or None Axis position(s) where new axis is to be inserted (position(s) on the result array). If a list (or tuple) of integers is passed, multiple axes are inserted. In this case, dim arguments should be same length list. If axis=None is passed, all the axes will be inserted to the start of the result array. - **dim_kwargs : int or sequence/ndarray + **dim_kwargs : int or sequence or ndarray The keywords are arbitrary dimensions being inserted and the values are either the lengths of the new dims (if int is given), or their coordinates. Note, this is an alternative to passing a dict to the @@ -3178,7 +3180,7 @@ def set_index( append : bool, optional If True, append the supplied index(es) to the existing index(es). Otherwise replace the existing index(es) (default). - **indexes_kwargs: optional + **indexes_kwargs : optional The keyword arguments form of ``indexes``. One of indexes or indexes_kwargs must be provided. @@ -3275,7 +3277,7 @@ def reorder_levels( Mapping from names matching dimensions and values given by lists representing new level orders. Every given dimension must have a multi-index. - **dim_order_kwargs: optional + **dim_order_kwargs : optional The keyword arguments form of ``dim_order``. One of dim_order or dim_order_kwargs must be provided. @@ -3343,12 +3345,13 @@ def stack( Parameters ---------- - dimensions : Mapping of the form new_name=(dim1, dim2, ...) - Names of new dimensions, and the existing dimensions that they - replace. An ellipsis (`...`) will be replaced by all unlisted dimensions. + dimensions : mapping of hashable to sequence of hashable + Mapping of the form `new_name=(dim1, dim2, ...)`. Names of new + dimensions, and the existing dimensions that they replace. An + ellipsis (`...`) will be replaced by all unlisted dimensions. Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over all dimensions. - **dimensions_kwargs: + **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. @@ -3382,9 +3385,9 @@ def to_stacked_array( Parameters ---------- - new_dim : Hashable + new_dim : hashable Name of the new stacked coordinate - sample_dims : Sequence[Hashable] + sample_dims : sequence of hashable Dimensions that **will not** be stacked. Each array in the dataset must share these dimensions. For machine learning applications, these define the dimensions over which samples are drawn. @@ -3538,11 +3541,13 @@ def unstack( Parameters ---------- - dim : Hashable or iterable of Hashable, optional + dim : hashable or iterable of hashable, optional Dimension(s) over which to unstack. By default unstacks all MultiIndexes. - fill_value: value to be filled. By default, np.nan - sparse: use sparse-array if True + fill_value : scalar, default: nan + value to be filled + sparse : bool, default: False + use sparse-array if True Returns ------- @@ -3588,7 +3593,7 @@ def update(self, other: "CoercibleMapping", inplace: bool = None) -> "Dataset": Parameters ---------- - other : Dataset or castable to Dataset + other : Dataset or mapping Variables with which to update this dataset. One of: - Dataset @@ -3631,13 +3636,13 @@ def merge( Parameters ---------- - other : Dataset or castable to Dataset + other : Dataset or mapping Dataset or variables to merge with this dataset. - overwrite_vars : Hashable or iterable of Hashable, optional + overwrite_vars : hashable or iterable of hashable, optional If provided, update variables of these name(s) without checking for conflicts in this dataset. - compat : {'broadcast_equals', 'equals', 'identical', - 'no_conflicts'}, optional + compat : {"broadcast_equals", "equals", "identical", \ + "no_conflicts"}, optional String indicating how to compare variables of the same name for potential conflicts: @@ -3650,7 +3655,7 @@ def merge( must be equal. The returned dataset then contains the combination of all non-null values. - join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + join : {"outer", "inner", "left", "right", "exact"}, optional Method for joining ``self`` and ``other`` along shared dimensions: - 'outer': use the union of the indexes @@ -3658,7 +3663,7 @@ def merge( - 'left': use indexes from ``self`` - 'right': use indexes from ``other`` - 'exact': error instead of aligning non-equal indexes - fill_value: scalar, optional + fill_value : scalar, optional Value to use for newly missing values Returns @@ -3702,9 +3707,9 @@ def drop_vars( Parameters ---------- - names : hashable or iterable of hashables + names : hashable or iterable of hashable Name(s) of variables to drop. - errors: {'raise', 'ignore'}, optional + errors : {"raise", "ignore"}, optional If 'raise' (default), raises a ValueError error if any of the variable passed are not in the dataset. If 'ignore', any given names that are in the dataset are dropped and no error is raised. @@ -3783,9 +3788,9 @@ def drop_sel(self, labels=None, *, errors="raise", **labels_kwargs): Parameters ---------- - labels : Mapping[Hashable, Any] + labels : mapping of hashable to Any Index labels to drop - errors: {'raise', 'ignore'}, optional + errors : {"raise", "ignore"}, optional If 'raise' (default), raises a ValueError error if any of the index labels passed are not in the dataset. If 'ignore', any given labels that are in the @@ -3848,7 +3853,7 @@ def drop_dims( ---------- drop_dims : hashable or iterable of hashable Dimension or dimensions to drop. - errors: {'raise', 'ignore'}, optional + errors : {"raise", "ignore"}, optional If 'raise' (default), raises a ValueError error if any of the dimensions passed are not in the dataset. If 'ignore', any given labels that are in the dataset are dropped and no error is raised. @@ -3858,7 +3863,7 @@ def drop_dims( obj : Dataset The dataset without the given dimensions (or any variables containing those dimensions) - errors: {'raise', 'ignore'}, optional + errors : {"raise", "ignore"}, optional If 'raise' (default), raises a ValueError error if any of the dimensions passed are not in the dataset. If 'ignore', any given dimensions that are in the @@ -3890,7 +3895,7 @@ def transpose(self, *dims: Hashable) -> "Dataset": Parameters ---------- - *dims : Hashable, optional + *dims : hashable, optional By default, reverse the dimensions on each array. Otherwise, reorder the dimensions to this order. @@ -3935,13 +3940,13 @@ def dropna( Parameters ---------- - dim : Hashable + dim : hashable Dimension along which to drop missing values. Dropping along multiple dimensions simultaneously is not yet supported. - how : {'any', 'all'}, optional + how : {"any", "all"}, default: "any" * any : if any NA values are present, drop that label * all : if all values are NA, drop that label - thresh : int, default None + thresh : int, default: None If supplied, require this many non-NA values. subset : iterable of hashable, optional Which variables to check for missing values. By default, all @@ -4097,18 +4102,18 @@ def interpolate_na( - 'barycentric', 'krog', 'pchip', 'spline', 'akima': use their respective :py:class:`scipy.interpolate` classes. - use_coordinate : bool, str, default True + use_coordinate : bool, str, default: True Specifies which index to use as the x values in the interpolation formulated as `y = f(x)`. If False, values are treated as if eqaully-spaced along ``dim``. If True, the IndexVariable `dim` is used. If ``use_coordinate`` is a string, it specifies the name of a coordinate variariable to use as the index. - limit : int, default None + limit : int, default: None Maximum number of consecutive NaNs to fill. Must be greater than 0 or None for no limit. This filling is done regardless of the size of the gap in the data. To only interpolate over gaps less than a given length, see ``max_gap``. - max_gap: int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default None. + max_gap : int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default: None Maximum size of gap, a continuous sequence of NaNs, that will be filled. Use None for no limit. When interpolating along a datetime64 dimension and ``use_coordinate=True``, ``max_gap`` can be one of the following: @@ -4168,7 +4173,7 @@ def ffill(self, dim: Hashable, limit: int = None) -> "Dataset": dim : Hashable Specifies the dimension along which to propagate values when filling. - limit : int, default None + limit : int, default: None The maximum number of consecutive NaN values to forward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater @@ -4193,7 +4198,7 @@ def bfill(self, dim: Hashable, limit: int = None) -> "Dataset": dim : str Specifies the dimension along which to propagate values when filling. - limit : int, default None + limit : int, default: None The maximum number of consecutive NaN values to backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater @@ -4252,7 +4257,7 @@ def reduce( If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. - keepdims : bool, default False + keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one. Coordinates that use these dimensions are removed. @@ -4406,12 +4411,12 @@ def assign( Parameters ---------- - variables : mapping, value pairs + variables : mapping of hashable to Any Mapping from variables names to the new values. If the new values are callable, they are computed on the Dataset and assigned to new data variables. If the values are not callable, (e.g. a DataArray, scalar, or array), they are simply assigned. - **variables_kwargs: + **variables_kwargs The keyword arguments form of ``variables``. One of variables or variables_kwargs must be provided. @@ -4673,9 +4678,9 @@ def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> "Datas Parameters ---------- - dataframe : pandas.DataFrame + dataframe : DataFrame DataFrame from which to copy data and indices. - sparse : bool + sparse : bool, default: False If true, create a sparse arrays instead of dense numpy arrays. This can potentially save a large amount of memory if the DataFrame has a MultiIndex. Requires the sparse package (sparse.pydata.org). @@ -4836,27 +4841,35 @@ def from_dict(cls, d): """ Convert a dictionary into an xarray.Dataset. - Input dict can take several forms:: + Input dict can take several forms: + + .. code:: python - d = {'t': {'dims': ('t'), 'data': t}, - 'a': {'dims': ('t'), 'data': x}, - 'b': {'dims': ('t'), 'data': y}} + d = { + "t": {"dims": ("t"), "data": t}, + "a": {"dims": ("t"), "data": x}, + "b": {"dims": ("t"), "data": y}, + } - d = {'coords': {'t': {'dims': 't', 'data': t, - 'attrs': {'units':'s'}}}, - 'attrs': {'title': 'air temperature'}, - 'dims': 't', - 'data_vars': {'a': {'dims': 't', 'data': x, }, - 'b': {'dims': 't', 'data': y}}} + d = { + "coords": {"t": {"dims": "t", "data": t, "attrs": {"units": "s"}}}, + "attrs": {"title": "air temperature"}, + "dims": "t", + "data_vars": { + "a": {"dims": "t", "data": x,}, + "b": {"dims": "t", "data": y}, + }, + } - where 't' is the name of the dimesion, 'a' and 'b' are names of data + where "t" is the name of the dimesion, "a" and "b" are names of data variables and t, x, and y are lists, numpy.arrays or pandas objects. Parameters ---------- - d : dict, with a minimum structure of {'var_0': {'dims': [..], \ - 'data': [..]}, \ - ...} + d : dict-like + Mapping with a minimum structure of + ``{"var_0": {"dims": [..], "data": [..]}, \ + ...}`` Returns ------- @@ -5100,13 +5113,13 @@ def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs): Parameters ---------- - shifts : Mapping with the form of {dim: offset} + shifts : mapping of hashable to int Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. - fill_value: scalar, optional + fill_value : scalar, optional Value to use for newly missing values - **shifts_kwargs: + **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. @@ -5249,15 +5262,15 @@ def sortby(self, variables, ascending=True): Parameters ---------- - variables: str, DataArray, or list of either + variables: str, DataArray, or list of str or DataArray 1D DataArray objects or name(s) of 1D variable(s) in coords/data_vars whose values are used to sort the dataset. - ascending: boolean, optional + ascending: bool, optional Whether to sort by ascending or descending order. Returns ------- - sorted: Dataset + sorted : Dataset A new dataset where all the specified dims are sorted by dim labels. """ @@ -5300,11 +5313,11 @@ def quantile( Parameters ---------- - q : float in range of [0,1] or array-like of floats + q : float or array-like of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: @@ -5483,12 +5496,12 @@ def differentiate(self, coord, edge_order=1, datetime_unit=None): Parameters ---------- - coord: str + coord : str The coordinate to be used to compute the gradient. - edge_order: 1 or 2. Default 1 + edge_order : {1, 2}, default: 1 N-th order accurate differences at the boundaries. - datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', - 'us', 'ns', 'ps', 'fs', 'as'} + datetime_unit : None or {"Y", "M", "W", "D", "h", "m", "s", "ms", \ + "us", "ns", "ps", "fs", "as"}, default: None Unit to compute gradient. Only valid for datetime coordinate. Returns @@ -5541,16 +5554,15 @@ def integrate(self, coord, datetime_unit=None): Parameters ---------- - coord: str, or a sequence of str + coord: str, or sequence of str Coordinate(s) used for the integration. - datetime_unit - Can be specify the unit if datetime coordinate is used. One of - {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', - 'as'} + datetime_unit : {"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", \ + "ps", "fs", "as"}, optional + Can be specify the unit if datetime coordinate is used. Returns ------- - integrated: Dataset + integrated : Dataset See also -------- @@ -5662,7 +5674,7 @@ def filter_by_attrs(self, **kwargs): Parameters ---------- - **kwargs : key=value + **kwargs key : str Attribute name. value : callable or obj @@ -5806,7 +5818,7 @@ def map_blocks( Parameters ---------- - func: callable + func : callable User-provided function that accepts a Dataset as its first parameter. The function will receive a subset or 'block' of this Dataset (see below), corresponding to one chunk along each chunked dimension. ``func`` will be @@ -5815,14 +5827,13 @@ def map_blocks( This function must return either a single DataArray or a single Dataset. This function cannot add a new chunked dimension. - - args: Sequence + args : sequence Passed to func after unpacking and subsetting any xarray objects by blocks. xarray objects in args must be aligned with obj, otherwise an error is raised. - kwargs: Mapping + kwargs : mapping Passed verbatim to func after unpacking. xarray objects, if any, will not be subset to blocks. Passing dask collections in kwargs is not allowed. - template: (optional) DataArray, Dataset + template : DataArray or Dataset, optional xarray object representing the final result after compute is called. If not provided, the function will be first run on mocked-up data, that looks like this object but has sizes 0, to determine properties of the returned object such as dtype, @@ -5926,13 +5937,13 @@ def polyfit( invalid values, False otherwise. rcond : float, optional Relative condition number to the fit. - w : Union[Hashable, Any], optional + w : hashable or Any, optional Weights to apply to the y-coordinate of the sample points. Can be an array-like object or the name of a coordinate in the dataset. full : bool, optional Whether to return the residuals, matrix rank and singular values in addition to the coefficients. - cov : Union[bool, str], optional + cov : bool or str, optional Whether to return to the covariance matrix in addition to the coefficients. The matrix is not scaled if `cov='unscaled'`. @@ -6104,10 +6115,11 @@ def pad( Parameters ---------- - pad_width : Mapping with the form of {dim: (pad_before, pad_after)} - Number of values padded along each dimension. + pad_width : mapping of hashable to tuple of int + Mapping with the form of {dim: (pad_before, pad_after)} + describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad - mode : str + mode : str, default: "constant" One of the following string values (taken from numpy docs). 'constant' (default) @@ -6140,7 +6152,7 @@ def pad( Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. - stat_length : int, tuple or mapping of the form {dim: tuple} + stat_length : int, tuple or mapping of hashable to tuple, default: None Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique @@ -6150,7 +6162,7 @@ def pad( (stat_length,) or int is a shortcut for before = after = statistic length for all axes. Default is ``None``, to use the entire axis. - constant_values : scalar, tuple or mapping of the form {dim: tuple} + constant_values : scalar, tuple or mapping of hashable to tuple, default: 0 Used in 'constant'. The values to set the padded values for each axis. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique @@ -6160,7 +6172,7 @@ def pad( ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all dimensions. Default is 0. - end_values : scalar, tuple or mapping of the form {dim: tuple} + end_values : scalar, tuple or mapping of hashable to tuple, default: 0 Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique @@ -6170,12 +6182,12 @@ def pad( ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all axes. Default is 0. - reflect_type : {'even', 'odd'}, optional - Used in 'reflect', and 'symmetric'. The 'even' style is the + reflect_type : {"even", "odd"}, optional + Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For - the 'odd' style, the extended part of the array is created by + the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. - **pad_width_kwargs: + **pad_width_kwargs The keyword arguments form of ``pad_width``. One of ``pad_width`` or ``pad_width_kwargs`` must be provided. @@ -6263,18 +6275,18 @@ def idxmin( dim : str, optional Dimension over which to apply `idxmin`. This is optional for 1D variables, but required for variables with 2 or more dimensions. - skipna : bool or None, default None + skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). - fill_value : Any, default NaN + fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. - keep_attrs : bool, default False + keep_attrs : bool, default: False If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. @@ -6361,18 +6373,18 @@ def idxmax( dim : str, optional Dimension over which to apply `idxmax`. This is optional for 1D variables, but required for variables with 2 or more dimensions. - skipna : bool or None, default None + skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). - fill_value : Any, default NaN + fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. - keep_attrs : bool, default False + keep_attrs : bool, default: False If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py index 5087390ecc0..8fb343a97bf 100644 --- a/xarray/core/groupby.py +++ b/xarray/core/groupby.py @@ -43,7 +43,7 @@ def unique_value_groups(ar, sort=True): ---------- ar : array-like Input array. This will be flattened if it is not already 1-D. - sort : boolean, optional + sort : bool, optional Whether or not to sort unique values. Returns @@ -128,7 +128,7 @@ def _inverse_permutation_indices(positions): Parameters ---------- - positions : list of np.ndarray or slice objects. + positions : list of ndarray or slice If slice objects, all are assumed to be slices. Returns @@ -283,16 +283,16 @@ def __init__( Object to group. group : DataArray Array with the group values. - squeeze : boolean, optional + squeeze : bool, optional If "group" is a coordinate of object, `squeeze` controls whether the subarrays have a dimension of length 1 along that coordinate or if the dimension is squeezed out. - grouper : pd.Grouper, optional + grouper : pandas.Grouper, optional Used for grouping values along the `group` array. bins : array-like, optional If `bins` is specified, the groups will be discretized into the specified bins by `pandas.cut`. - restore_coord_dims : bool, default True + restore_coord_dims : bool, default: True If True, also restore the dimension order of multi-dimensional coordinates. cut_kwargs : dict, optional @@ -533,8 +533,10 @@ def fillna(self, value): Parameters ---------- - value : valid type for the grouped object's fillna method - Used to fill all matching missing values by group. + value + Used to fill all matching missing values by group. Needs + to be of a valid type for the wrapped object's fillna + method. Returns ------- @@ -556,13 +558,13 @@ def quantile( Parameters ---------- - q : float in range of [0,1] (or sequence of floats) + q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. - dim : `...`, str or sequence of str, optional + dim : ..., str or sequence of str, optional Dimension(s) over which to apply quantile. Defaults to the grouped dimension. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: @@ -660,8 +662,8 @@ def where(self, cond, other=dtypes.NA): Parameters ---------- - cond : DataArray or Dataset with boolean dtype - Locations at which to preserve this objects values. + cond : DataArray or Dataset + Locations at which to preserve this objects values. dtypes have to be `bool` other : scalar, DataArray or Dataset, optional Value to use for locations in this object where ``cond`` is False. By default, inserts missing values. @@ -769,7 +771,7 @@ def map(self, func, shortcut=False, args=(), **kwargs): Parameters ---------- - func : function + func : callable Callable to apply to each array. shortcut : bool, optional Whether or not to shortcut evaluation under the assumptions that: @@ -783,9 +785,9 @@ def map(self, func, shortcut=False, args=(), **kwargs): If these conditions are satisfied `shortcut` provides significant speedup. This should be the case for many common groupby operations (e.g., applying numpy ufuncs). - ``*args`` : tuple, optional + *args : tuple, optional Positional arguments passed to `func`. - ``**kwargs`` + **kwargs Used to call `func(ar, **kwargs)` for each array `ar`. Returns @@ -847,11 +849,11 @@ def reduce( Parameters ---------- - func : function + func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. - dim : `...`, str or sequence of str, optional + dim : ..., str or sequence of str, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dimension' @@ -907,7 +909,7 @@ def map(self, func, args=(), shortcut=None, **kwargs): Parameters ---------- - func : function + func : callable Callable to apply to each sub-dataset. args : tuple, optional Positional arguments to pass to `func`. @@ -958,11 +960,11 @@ def reduce(self, func, dim=None, keep_attrs=None, **kwargs): Parameters ---------- - func : function + func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. - dim : `...`, str or sequence of str, optional + dim : ..., str or sequence of str, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dimension' diff --git a/xarray/core/merge.py b/xarray/core/merge.py index 35b77d700a0..2a837295472 100644 --- a/xarray/core/merge.py +++ b/xarray/core/merge.py @@ -90,12 +90,12 @@ def unique_variable( ---------- name : hashable Name for this variable. - variables : list of xarray.Variable + variables : list of Variable List of Variable objects, all of which go by the same name in different inputs. - compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional + compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional Type of equality check to use. - equals: None or bool, + equals : None or bool, optional corresponding to result of compat test Returns @@ -170,7 +170,9 @@ def merge_collected( Parameters ---------- - + grouped : mapping + prioritized : mapping + compat : str Type of equality check to use when checking for conflicts. Returns @@ -335,7 +337,7 @@ def determine_coords( Parameters ---------- - list_of_mappings : list of dict or Dataset objects + list_of_mappings : list of dict or list of Dataset Of the same form as the arguments to expand_variable_dicts. Returns @@ -371,7 +373,7 @@ def coerce_pandas_values(objects: Iterable["CoercibleMapping"]) -> List["Dataset Parameters ---------- - objects : list of Dataset or mappings + objects : list of Dataset or mapping The mappings may contain any sort of objects coercible to xarray.Variables as keys, including pandas objects. @@ -410,11 +412,11 @@ def _get_priority_vars_and_indexes( Parameters ---------- - objects : list of dictionaries of variables + objects : list of dict-like of Variable Dictionaries in which to find the priority variables. priority_arg : int or None Integer object whose variable should take priority. - compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional + compat : {"identical", "equals", "broadcast_equals", "no_conflicts"}, optional Compatibility checks to use when merging variables. Returns @@ -550,15 +552,15 @@ def merge_core( Parameters ---------- - objects : list of mappings + objects : list of mapping All values must be convertable to labeled arrays. - compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional + compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional Compatibility checks to use when merging variables. - join : {'outer', 'inner', 'left', 'right'}, optional + join : {"outer", "inner", "left", "right"}, optional How to combine objects with different indexes. - combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, optional + combine_attrs : {"drop", "identical", "no_conflicts", "override"}, optional How to combine attributes of objects - priority_arg : integer, optional + priority_arg : int, optional Optional argument in `objects` that takes precedence over the others. explicit_coords : set, optional An explicit list of variables from `objects` that are coordinates. @@ -636,45 +638,45 @@ def merge( Parameters ---------- - objects : Iterable[Union[xarray.Dataset, xarray.DataArray, dict]] + objects : iterable of Dataset or iterable of DataArray or iterable of dict-like Merge together all variables from these objects. If any of them are DataArray objects, they must have a name. - compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional + compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional String indicating how to compare variables of the same name for potential conflicts: - - 'broadcast_equals': all values must be equal when variables are + - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - - 'equals': all values and dimensions must be the same. - - 'identical': all values, dimensions and attributes must be the + - "equals": all values and dimensions must be the same. + - "identical": all values, dimensions and attributes must be the same. - - 'no_conflicts': only values which are not null in both datasets + - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - - 'override': skip comparing and pick variable from first dataset - join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + - "override": skip comparing and pick variable from first dataset + join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes in objects. - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': instead of aligning, raise `ValueError` when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - - 'override': if indexes are of same size, rewrite indexes to be + - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. fill_value : scalar, optional Value to use for newly missing values - combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, - default 'drop' + combine_attrs : {"drop", "identical", "no_conflicts", "override"}, \ + default: "drop" String indicating how to combine attrs of the objects being merged: - - 'drop': empty attrs on returned Dataset. - - 'identical': all attrs must be the same on every object. - - 'no_conflicts': attrs from all objects are combined, any that have + - "drop": empty attrs on returned Dataset. + - "identical": all attrs must be the same on every object. + - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - - 'override': skip comparing and copy attrs from the first dataset to + - "override": skip comparing and copy attrs from the first dataset to the result. Returns diff --git a/xarray/core/ops.py b/xarray/core/ops.py index 3675317977f..9dd9ee24ccd 100644 --- a/xarray/core/ops.py +++ b/xarray/core/ops.py @@ -114,7 +114,7 @@ implemented (object, datetime64 or timedelta64).""" _MINCOUNT_DOCSTRING = """ -min_count : int, default None +min_count : int, default: None The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. New in version 0.10.8: Added with the default being None.""" @@ -140,22 +140,22 @@ def fillna(data, other, join="left", dataset_join="left"): Parameters ---------- - join : {'outer', 'inner', 'left', 'right'}, optional + join : {"outer", "inner", "left", "right"}, optional Method for joining the indexes of the passed objects along each dimension - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': raise `ValueError` instead of aligning when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": raise `ValueError` instead of aligning when indexes to be aligned are not equal - dataset_join : {'outer', 'inner', 'left', 'right'}, optional + dataset_join : {"outer", "inner", "left", "right"}, optional Method for joining variables of Dataset objects with mismatched data variables. - - 'outer': take variables from both Dataset objects - - 'inner': take only overlapped variables - - 'left': take only variables from the first object - - 'right': take only variables from the last object + - "outer": take variables from both Dataset objects + - "inner": take only overlapped variables + - "left": take only variables from the first object + - "right": take only variables from the last object """ from .computation import apply_ufunc diff --git a/xarray/core/parallel.py b/xarray/core/parallel.py index 6d5456f77f7..74a02015ce5 100644 --- a/xarray/core/parallel.py +++ b/xarray/core/parallel.py @@ -176,7 +176,7 @@ def map_blocks( Parameters ---------- - func: callable + func : callable User-provided function that accepts a DataArray or Dataset as its first parameter ``obj``. The function will receive a subset or 'block' of ``obj`` (see below), corresponding to one chunk along each chunked dimension. ``func`` will be @@ -186,15 +186,15 @@ def map_blocks( This function cannot add a new chunked dimension. - obj: DataArray, Dataset + obj : DataArray, Dataset Passed to the function as its first argument, one block at a time. - args: Sequence + args : sequence Passed to func after unpacking and subsetting any xarray objects by blocks. xarray objects in args must be aligned with obj, otherwise an error is raised. - kwargs: Mapping + kwargs : mapping Passed verbatim to func after unpacking. xarray objects, if any, will not be subset to blocks. Passing dask collections in kwargs is not allowed. - template: (optional) DataArray, Dataset + template : DataArray or Dataset, optional xarray object representing the final result after compute is called. If not provided, the function will be first run on mocked-up data, that looks like ``obj`` but has sizes 0, to determine properties of the returned object such as dtype, diff --git a/xarray/core/resample.py b/xarray/core/resample.py index 2b3b7da6217..af9711a3cc3 100644 --- a/xarray/core/resample.py +++ b/xarray/core/resample.py @@ -29,8 +29,8 @@ def _upsample(self, method, *args, **kwargs): Parameters ---------- - method : str {'asfreq', 'pad', 'ffill', 'backfill', 'bfill', 'nearest', - 'interpolate'} + method : {"asfreq", "pad", "ffill", "backfill", "bfill", "nearest", \ + "interpolate"} Method to use for up-sampling See Also @@ -130,8 +130,8 @@ def interpolate(self, kind="linear"): Parameters ---------- - kind : str {'linear', 'nearest', 'zero', 'slinear', - 'quadratic', 'cubic'} + kind : {"linear", "nearest", "zero", "slinear", \ + "quadratic", "cubic"}, default: "linear" Interpolation scheme to use See Also @@ -193,7 +193,7 @@ def map(self, func, shortcut=False, args=(), **kwargs): Parameters ---------- - func : function + func : callable Callable to apply to each array. shortcut : bool, optional Whether or not to shortcut evaluation under the assumptions that: @@ -287,7 +287,7 @@ def map(self, func, args=(), shortcut=None, **kwargs): Parameters ---------- - func : function + func : callable Callable to apply to each sub-dataset. args : tuple, optional Positional arguments passed on to `func`. @@ -327,7 +327,7 @@ def reduce(self, func, dim=None, keep_attrs=None, **kwargs): Parameters ---------- - func : function + func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index fb38c0c7fe6..a595f9db266 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -54,17 +54,14 @@ def __init__(self, obj, windows, min_periods=None, center=False, keep_attrs=None ---------- obj : Dataset or DataArray Object to window. - windows : A mapping from a dimension name to window size - dim : str - Name of the dimension to create the rolling iterator - along (e.g., `time`). - window : int - Size of the moving window. - min_periods : int, default None + windows : mapping of hashable to int + A mapping from the name of the dimension to create the rolling + exponential window along (e.g. `time`) to the size of the moving window. + min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. - center : boolean, default False + center : bool, default: False Set the labels at the center of the window. keep_attrs : bool, optional If True, the object's attributes (`attrs`) will be copied from @@ -174,17 +171,14 @@ def __init__(self, obj, windows, min_periods=None, center=False, keep_attrs=None ---------- obj : DataArray Object to window. - windows : A mapping from a dimension name to window size - dim : str - Name of the dimension to create the rolling iterator - along (e.g., `time`). - window : int - Size of the moving window. - min_periods : int, default None + windows : mapping of hashable to int + A mapping from the name of the dimension to create the rolling + exponential window along (e.g. `time`) to the size of the moving window. + min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. - center : boolean, default False + center : bool, default: False Set the labels at the center of the window. keep_attrs : bool, optional If True, the object's attributes (`attrs`) will be copied from @@ -234,12 +228,12 @@ def construct( Parameters ---------- - window_dim: str or a mapping, optional + window_dim : str or mapping, optional A mapping from dimension name to the new window dimension names. Just a string can be used for 1d-rolling. - stride: integer or a mapping, optional + stride : int or mapping of int, optional Size of stride for the rolling window. - fill_value: optional. Default dtypes.NA + fill_value : default: dtypes.NA Filling value to match the dimension size. **window_dim_kwargs : {dim: new_name, ...}, optional The keyword arguments form of ``window_dim``. @@ -299,7 +293,7 @@ def reduce(self, func, **kwargs): Parameters ---------- - func : function + func : callable Function which can be called in the form `func(x, **kwargs)` to return the result of collapsing an np.ndarray over an the rolling dimension. @@ -448,17 +442,14 @@ def __init__(self, obj, windows, min_periods=None, center=False, keep_attrs=None ---------- obj : Dataset Object to window. - windows : A mapping from a dimension name to window size - dim : str - Name of the dimension to create the rolling iterator - along (e.g., `time`). - window : int - Size of the moving window. - min_periods : int, default None + windows : mapping of hashable to int + A mapping from the name of the dimension to create the rolling + exponential window along (e.g. `time`) to the size of the moving window. + min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. - center : boolean, or a mapping from dimension name to boolean, default False + center : bool or mapping of hashable to bool, default: False Set the labels at the center of the window. keep_attrs : bool, optional If True, the object's attributes (`attrs`) will be copied from @@ -513,7 +504,7 @@ def reduce(self, func, **kwargs): Parameters ---------- - func : function + func : callable Function which can be called in the form `func(x, **kwargs)` to return the result of collapsing an np.ndarray over an the rolling dimension. @@ -558,12 +549,12 @@ def construct( Parameters ---------- - window_dim: str or a mapping, optional + window_dim : str or mapping, optional A mapping from dimension name to the new window dimension names. Just a string can be used for 1d-rolling. - stride: integer, optional + stride : int, optional size of stride for the rolling window. - fill_value: optional. Default dtypes.NA + fill_value : Any, default: dtypes.NA Filling value to match the dimension size. **window_dim_kwargs : {dim: new_name, ...}, optional The keyword arguments form of ``window_dim``. @@ -635,12 +626,9 @@ def __init__(self, obj, windows, boundary, side, coord_func, keep_attrs): ---------- obj : Dataset or DataArray Object to window. - windows : A mapping from a dimension name to window size - dim : str - Name of the dimension to create the rolling iterator - along (e.g., `time`). - window : int - Size of the moving window. + windows : mapping of hashable to int + A mapping from the name of the dimension to create the rolling + exponential window along (e.g. `time`) to the size of the moving window. boundary : 'exact' | 'trim' | 'pad' If 'exact', a ValueError will be raised if dimension size is not a multiple of window size. If 'trim', the excess indexes are trimed. diff --git a/xarray/core/rolling_exp.py b/xarray/core/rolling_exp.py index 6ef63e42291..525867cc025 100644 --- a/xarray/core/rolling_exp.py +++ b/xarray/core/rolling_exp.py @@ -65,17 +65,13 @@ class RollingExp: ---------- obj : Dataset or DataArray Object to window. - windows : A single mapping from a single dimension name to window value - dim : str - Name of the dimension to create the rolling exponential window - along (e.g., `time`). - window : int - Size of the moving window. The type of this is specified in - `window_type` - window_type : str, one of ['span', 'com', 'halflife', 'alpha'], default 'span' + windows : mapping of hashable to int + A mapping from the name of the dimension to create the rolling + exponential window along (e.g. `time`) to the size of the moving window. + window_type : {"span", "com", "halflife", "alpha"}, default: "span" The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: - https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html + https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html Returns ------- diff --git a/xarray/core/utils.py b/xarray/core/utils.py index 668405ba574..ac060215848 100644 --- a/xarray/core/utils.py +++ b/xarray/core/utils.py @@ -116,7 +116,7 @@ def multiindex_from_product_levels( ---------- levels : sequence of pd.Index Values for each MultiIndex level. - names : optional sequence of objects + names : sequence of str, optional Names for each level. Returns diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 1f86a40348c..a9567e80ce4 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -523,14 +523,14 @@ def _broadcast_indexes(self, key): Parameters ----------- - key: int, slice, array, dict or tuple of integer, slices and arrays + key: int, slice, array-like, dict or tuple of integer, slice and array-like Any valid input for indexing. Returns ------- - dims: tuple + dims : tuple Dimension of the resultant variable. - indexers: IndexingTuple subclass + indexers : IndexingTuple subclass Tuple of integer, array-like, or slices to use when indexing self._data. The type of this argument indicates the type of indexing to perform, either basic, outer or vectorized. @@ -1053,7 +1053,7 @@ def isel( **indexers : {dim: indexer, ...} Keyword arguments with names matching dimensions and values given by integers, slice objects or arrays. - missing_dims : {"raise", "warn", "ignore"}, default "raise" + missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception @@ -1146,7 +1146,7 @@ def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs): left. fill_value: scalar, optional Value to use for newly missing values - **shifts_kwargs: + **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. @@ -1194,26 +1194,27 @@ def pad( Parameters ---------- - pad_width: Mapping with the form of {dim: (pad_before, pad_after)} - Number of values padded along each dimension. + pad_width : mapping of hashable to tuple of int + Mapping with the form of {dim: (pad_before, pad_after)} + describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad - mode: (str) + mode : str, default: "constant" See numpy / Dask docs - stat_length : int, tuple or mapping of the form {dim: tuple} + stat_length : int, tuple or mapping of hashable to tuple Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. - constant_values : scalar, tuple or mapping of the form {dim: tuple} + constant_values : scalar, tuple or mapping of hashable to tuple Used in 'constant'. The values to set the padded values for each axis. - end_values : scalar, tuple or mapping of the form {dim: tuple} + end_values : scalar, tuple or mapping of hashable to tuple Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. - reflect_type : {'even', 'odd'}, optional - Used in 'reflect', and 'symmetric'. The 'even' style is the + reflect_type : {"even", "odd"}, optional + Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For - the 'odd' style, the extended part of the array is created by + the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. - **pad_width_kwargs: + **pad_width_kwargs One of pad_width or pad_width_kwargs must be provided. Returns @@ -1298,11 +1299,11 @@ def roll(self, shifts=None, **shifts_kwargs): Parameters ---------- - shifts : mapping of the form {dim: offset} + shifts : mapping of hashable to int Integer offset to roll along each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. - **shifts_kwargs: + **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. @@ -1440,10 +1441,11 @@ def stack(self, dimensions=None, **dimensions_kwargs): Parameters ---------- - dimensions : Mapping of form new_name=(dim1, dim2, ...) - Names of new dimensions, and the existing dimensions that they - replace. - **dimensions_kwargs: + dimensions : mapping of hashable to tuple of hashable + Mapping of form new_name=(dim1, dim2, ...) describing the + names of new dimensions, and the existing dimensions that + they replace. + **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. @@ -1500,10 +1502,11 @@ def unstack(self, dimensions=None, **dimensions_kwargs): Parameters ---------- - dimensions : mapping of the form old_dim={dim1: size1, ...} - Names of existing dimensions, and the new dimensions and sizes + dimensions : mapping of hashable to mapping of hashable to int + Mapping of the form old_dim={dim1: size1, ...} describing the + names of existing dimensions, and the new dimensions and sizes that they map to. - **dimensions_kwargs: + **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. @@ -1542,7 +1545,7 @@ def reduce( Parameters ---------- - func : function + func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. @@ -1557,7 +1560,7 @@ def reduce( If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. - keepdims : bool, default False + keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one **kwargs : dict @@ -1627,7 +1630,7 @@ def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False): Parameters ---------- - variables : iterable of Array + variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. @@ -1637,7 +1640,7 @@ def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False): existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. - positions : None or list of integer arrays, optional + positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. @@ -1746,12 +1749,12 @@ def quantile( Parameters ---------- - q : float in range of [0,1] (or sequence of floats) + q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: @@ -1882,19 +1885,19 @@ def rolling_window( Parameters ---------- - dim: str + dim : str Dimension over which to compute rolling_window. For nd-rolling, should be list of dimensions. - window: int + window : int Window size of the rolling For nd-rolling, should be list of integers. - window_dim: str + window_dim : str New name of the window dimension. For nd-rolling, should be list of integers. - center: boolean. default False. + center : bool, default: False If True, pad fill_value for both ends. Otherwise, pad in the head of the axis. - fill_value: + fill_value value to be filled. Returns @@ -2537,7 +2540,7 @@ def concat(variables, dim="concat_dim", positions=None, shortcut=False): Parameters ---------- - variables : iterable of Array + variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. @@ -2547,7 +2550,7 @@ def concat(variables, dim="concat_dim", positions=None, shortcut=False): existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. - positions : None or list of integer arrays, optional + positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. diff --git a/xarray/plot/dataset_plot.py b/xarray/plot/dataset_plot.py index ea037c1a2c2..51ceff170cb 100644 --- a/xarray/plot/dataset_plot.py +++ b/xarray/plot/dataset_plot.py @@ -170,14 +170,14 @@ def _dsplot(plotfunc): ---------- ds : Dataset - x, y : string + x, y : str Variable names for x, y axis. hue: str, optional Variable by which to color scattered points hue_style: str, optional Can be either 'discrete' (legend) or 'continuous' (color bar). - markersize: str, optional (scatter only) - Variably by which to vary size of scattered points + markersize: str, optional + scatter only. Variable by which to vary size of scattered points. size_norm: optional Either None or 'Norm' instance to normalize the 'markersize' variable. add_guide: bool, optional @@ -185,13 +185,13 @@ def _dsplot(plotfunc): - for "discrete", build a legend. This is the default for non-numeric `hue` variables. - for "continuous", build a colorbar - row : string, optional + row : str, optional If passed, make row faceted plots on this dimension name - col : string, optional + col : str, optional If passed, make column faceted plots on this dimension name - col_wrap : integer, optional + col_wrap : int, optional Use together with ``col`` to wrap faceted plots - ax : matplotlib axes, optional + ax : matplotlib axes object, optional If None, uses the current axis. Not applicable when using facets. subplot_kws : dict, optional Dictionary of keyword arguments for matplotlib subplots. Only applies @@ -205,21 +205,23 @@ def _dsplot(plotfunc): norm : ``matplotlib.colors.Normalize`` instance, optional If the ``norm`` has vmin or vmax specified, the corresponding kwarg must be None. - vmin, vmax : floats, optional + vmin, vmax : float, optional Values to anchor the colormap, otherwise they are inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting one of these values will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. - cmap : matplotlib colormap name or object, optional - The mapping from data values to color space. If not provided, this - will be either be ``viridis`` (if the function infers a sequential - dataset) or ``RdBu_r`` (if the function infers a diverging dataset). - When `Seaborn` is installed, ``cmap`` may also be a `seaborn` - color palette. If ``cmap`` is seaborn color palette and the plot type - is not ``contour`` or ``contourf``, ``levels`` must also be specified. - colors : discrete colors to plot, optional + cmap : str or colormap, optional + The mapping from data values to color space. Either a + matplotlib colormap name or object. If not provided, this will + be either ``viridis`` (if the function infers a sequential + dataset) or ``RdBu_r`` (if the function infers a diverging + dataset). When `Seaborn` is installed, ``cmap`` may also be a + `seaborn` color palette. If ``cmap`` is seaborn color palette + and the plot type is not ``contour`` or ``contourf``, ``levels`` + must also be specified. + colors : color-like or list of color-like, optional A single color or a list of colors. If the plot type is not ``contour`` or ``contourf``, the ``levels`` argument is required. center : float, optional @@ -229,7 +231,7 @@ def _dsplot(plotfunc): robust : bool, optional If True and ``vmin`` or ``vmax`` are absent, the colormap range is computed with 2nd and 98th percentiles instead of the extreme values. - extend : {'neither', 'both', 'min', 'max'}, optional + extend : {"neither", "both", "min", "max"}, optional How to draw arrows extending the colorbar beyond its limits. If not provided, extend is inferred from vmin, vmax and the data limits. levels : int or list-like object, optional diff --git a/xarray/plot/plot.py b/xarray/plot/plot.py index be79f0ab04c..305405d4e5a 100644 --- a/xarray/plot/plot.py +++ b/xarray/plot/plot.py @@ -141,17 +141,17 @@ def plot( Parameters ---------- darray : DataArray - row : string, optional + row : str, optional If passed, make row faceted plots on this dimension name - col : string, optional + col : str, optional If passed, make column faceted plots on this dimension name - hue : string, optional + hue : str, optional If passed, make faceted line plots with hue on this dimension name - col_wrap : integer, optional + col_wrap : int, optional Use together with ``col`` to wrap faceted plots - ax : matplotlib axes, optional + ax : matplotlib.axes.Axes, optional If None, uses the current axis. Not applicable when using facets. - rtol : number, optional + rtol : float, optional Relative tolerance used to determine if the indexes are uniformly spaced. Usually a small positive number. subplot_kws : dict, optional @@ -265,9 +265,9 @@ def line( yincrease : None, True, or False, optional Should the values on the y axes be increasing from top to bottom? if None, use the default for the matplotlib function. - add_legend : boolean, optional + add_legend : bool, optional Add legend with y axis coordinates (2D inputs only). - ``*args``, ``**kwargs`` : optional + *args, **kwargs : optional Additional arguments to matplotlib.pyplot.plot """ # Handle facetgrids first @@ -337,23 +337,23 @@ def step(darray, *args, where="pre", drawstyle=None, ds=None, **kwargs): Parameters ---------- - where : {'pre', 'post', 'mid'}, optional, default 'pre' + where : {"pre", "post", "mid"}, default: "pre" Define where the steps should be placed: - - 'pre': The y value is continued constantly to the left from + - "pre": The y value is continued constantly to the left from every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the value ``y[i]``. - - 'post': The y value is continued constantly to the right from + - "post": The y value is continued constantly to the right from every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the value ``y[i]``. - - 'mid': Steps occur half-way between the *x* positions. + - "mid": Steps occur half-way between the *x* positions. Note that this parameter is ignored if one coordinate consists of :py:func:`pandas.Interval` values, e.g. as a result of :py:func:`xarray.Dataset.groupby_bins`. In this case, the actual boundaries of the interval are used. - ``*args``, ``**kwargs`` : optional + *args, **kwargs : optional Additional arguments following :py:func:`xarray.plot.line` """ if where not in {"pre", "post", "mid"}: @@ -407,7 +407,7 @@ def hist( size : scalar, optional If provided, create a new figure for the plot with the given size. Height (in inches) of each plot. See also: ``aspect``. - ax : matplotlib axes object, optional + ax : matplotlib.axes.Axes, optional Axis on which to plot this figure. By default, use the current axis. Mutually exclusive with ``size`` and ``figsize``. **kwargs : optional @@ -494,7 +494,7 @@ def _plot2d(plotfunc): If passed, make row faceted plots on this dimension name col : string, optional If passed, make column faceted plots on this dimension name - col_wrap : integer, optional + col_wrap : int, optional Use together with ``col`` to wrap faceted plots xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional Specifies scaling for the x- and y-axes respectively @@ -506,9 +506,9 @@ def _plot2d(plotfunc): yincrease : None, True, or False, optional Should the values on the y axes be increasing from top to bottom? if None, use the default for the matplotlib function. - add_colorbar : Boolean, optional + add_colorbar : bool, optional Adds colorbar to axis - add_labels : Boolean, optional + add_labels : bool, optional Use xarray metadata to label axes norm : ``matplotlib.colors.Normalize`` instance, optional If the ``norm`` has vmin or vmax specified, the corresponding kwarg @@ -537,7 +537,7 @@ def _plot2d(plotfunc): robust : bool, optional If True and ``vmin`` or ``vmax`` are absent, the colormap range is computed with 2nd and 98th percentiles instead of the extreme values. - extend : {'neither', 'both', 'min', 'max'}, optional + extend : {"neither", "both", "min", "max"}, optional How to draw arrows extending the colorbar beyond its limits. If not provided, extend is inferred from vmin, vmax and the data limits. levels : int or list-like object, optional @@ -720,9 +720,7 @@ def newplotfunc( if "imshow" == plotfunc.__name__ and isinstance(aspect, str): # forbid usage of mpl strings - raise ValueError( - "plt.imshow's `aspect` kwarg is not available " "in xarray" - ) + raise ValueError("plt.imshow's `aspect` kwarg is not available in xarray") if subplot_kws is None: subplot_kws = dict() @@ -753,7 +751,7 @@ def newplotfunc( elif cbar_ax is not None or cbar_kwargs: # inform the user about keywords which aren't used raise ValueError( - "cbar_ax and cbar_kwargs can't be used with " "add_colorbar=False." + "cbar_ax and cbar_kwargs can't be used with add_colorbar=False." ) # origin kwarg overrides yincrease diff --git a/xarray/tests/test_duck_array_ops.py b/xarray/tests/test_duck_array_ops.py index 7d54aac36f8..b542dad998b 100644 --- a/xarray/tests/test_duck_array_ops.py +++ b/xarray/tests/test_duck_array_ops.py @@ -637,7 +637,7 @@ def test_docs(): skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). - min_count : int, default None + min_count : int, default: None The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. New in version 0.10.8: Added with the default being None. diff --git a/xarray/tutorial.py b/xarray/tutorial.py index d662f2fcaaf..63867cb5045 100644 --- a/xarray/tutorial.py +++ b/xarray/tutorial.py @@ -45,13 +45,13 @@ def open_dataset( Name of the file containing the dataset. If no suffix is given, assumed to be netCDF ('.nc' is appended) e.g. 'air_temperature' - cache_dir : string, optional + cache_dir : str, optional The directory in which to search for and write cached data. - cache : boolean, optional + cache : bool, optional If True, then cache data locally for use on subsequent calls - github_url : string + github_url : str Github repository where the data is stored - branch : string + branch : str The git branch to download from kws : dict, optional Passed to xarray.open_dataset