diff --git a/nomenclature/cli.py b/nomenclature/cli.py index 1a0f45e5..5e7a19aa 100644 --- a/nomenclature/cli.py +++ b/nomenclature/cli.py @@ -1,5 +1,4 @@ from pathlib import Path -from typing import List, Optional import importlib.util import sys @@ -55,10 +54,10 @@ def cli_valid_yaml(path: Path): def cli_valid_project( path: Path, definitions: str, - mappings: Optional[str], - required_data: Optional[str], - validate_data: Optional[str], - dimensions: Optional[List[str]], + mappings: str | None, + required_data: str | None, + validate_data: str | None, + dimensions: list[str] | None, ): """Assert that `path` is a valid project nomenclature @@ -74,7 +73,7 @@ def cli_valid_project( Name of folder for 'required data' criteria, default to "required_data" validate_data: str, optional Name of folder for data validation criteria, default to "validate_data" - dimensions : List[str], optional + dimensions : list[str], optional Dimensions to be checked, defaults to all sub-folders of `definitions` Example @@ -125,8 +124,8 @@ def check_region_aggregation( workflow_directory: Path, definitions: str, mappings: str, - processed_data: Optional[Path], - differences: Optional[Path], + processed_data: Path | None, + differences: Path | None, ): """Perform region processing and compare aggregated and original data @@ -141,10 +140,10 @@ def check_region_aggregation( Definitions folder inside workflow_directory, by default "definitions" mappings : str Model mapping folder inside workflow_directory, by default "mappings" - processed_data : Optional[Path] + processed_data : Path, optional If given, exports the results from region processing to a file called `processed_data`, by default "results.xlsx" - differences : Optional[Path] + differences : Path, optional If given, exports the differences between aggregated and model native data to a file called `differences`, by default None @@ -295,7 +294,7 @@ def cli_run_workflow( multiple=True, default=None, ) -def cli_validate_scenarios(input_file: Path, definitions: Path, dimensions: List[str]): +def cli_validate_scenarios(input_file: Path, definitions: Path, dimensions: list[str]): """Validate a scenario file against the codelists of a project Example @@ -312,7 +311,7 @@ def cli_validate_scenarios(input_file: Path, definitions: Path, dimensions: List Input data file, must be IAMC format, .xlsx or .csv definitions : Path Definitions folder with codelists, by default "definitions" - dimensions : List[str], optional + dimensions : list[str], optional Dimensions to be checked, defaults to all sub-folders of `definitions` Raises diff --git a/nomenclature/code.py b/nomenclature/code.py index d92e3f9b..9c06bf36 100644 --- a/nomenclature/code.py +++ b/nomenclature/code.py @@ -2,7 +2,7 @@ import re from keyword import iskeyword from pathlib import Path -from typing import Any, Dict, List, Set, Union, Optional +from typing import Any from pydantic import ( field_validator, field_serializer, @@ -24,8 +24,8 @@ class Code(BaseModel): name: str description: str | None = None - file: Union[str, Path] | None = None - extra_attributes: Dict[str, Any] = {} + file: str | Path | None = None + extra_attributes: dict[str, Any] = {} repository: str | None = None def __eq__(self, other) -> bool: @@ -34,8 +34,8 @@ def __eq__(self, other) -> bool: @field_validator("extra_attributes") @classmethod def check_attribute_names( - cls, v: Dict[str, Any], info: ValidationInfo - ) -> Dict[str, Any]: + cls, v: dict[str, Any], info: ValidationInfo + ) -> dict[str, Any]: # Check that attributes only contains keys which are valid identifiers if illegal_keys := [ key for key in v.keys() if not key.isidentifier() or iskeyword(key) @@ -79,7 +79,7 @@ def from_dict(cls, mapping) -> "Code": ) @classmethod - def named_attributes(cls) -> Set[str]: + def named_attributes(cls) -> set[str]: return {a for a in cls.model_fields if a != "extra_attributes"} @property @@ -181,10 +181,10 @@ def __setattr__(self, name, value): class VariableCode(Code): - unit: Union[str, List[str]] = Field(...) + unit: str | list[str] = Field(...) tier: int | str | None = None weight: str | None = None - region_aggregation: List[Dict[str, Dict]] | None = Field( + region_aggregation: list[dict[str, dict]] | None = Field( default=None, alias="region-aggregation" ) skip_region_aggregation: bool | None = Field( @@ -192,7 +192,7 @@ class VariableCode(Code): ) method: str | None = None check_aggregate: bool | None = Field(default=False, alias="check-aggregate") - components: Union[List[str], Dict[str, list[str]]] | None = None + components: list[str] | dict[str, list[str]] | None = None drop_negative_weights: bool | None = None model_config = ConfigDict(populate_by_name=True) @@ -225,17 +225,17 @@ def convert_str_to_none_for_writing(self, v): return v if v != "" else None @property - def units(self) -> List[str]: + def units(self) -> list[str]: return self.unit if isinstance(self.unit, list) else [self.unit] @classmethod - def named_attributes(cls) -> Set[str]: + def named_attributes(cls) -> set[str]: return ( super().named_attributes().union(f.alias for f in cls.model_fields.values()) ) @property - def pyam_agg_kwargs(self) -> Dict[str, Any]: + def pyam_agg_kwargs(self) -> dict[str, Any]: # return a dict of all not None pyam aggregation properties return { field: getattr(self, field) @@ -249,7 +249,7 @@ def pyam_agg_kwargs(self) -> Dict[str, Any]: } @property - def agg_kwargs(self) -> Dict[str, Any]: + def agg_kwargs(self) -> dict[str, Any]: return ( {**self.pyam_agg_kwargs, **{"region_aggregation": self.region_aggregation}} if self.region_aggregation is not None @@ -274,11 +274,11 @@ class RegionCode(Code): """ hierarchy: str = None - countries: Optional[List[str]] = None - iso3_codes: Optional[Union[List[str], str]] = None + countries: list[str] | None = None + iso3_codes: list[str] | str | None = None @field_validator("countries", mode="before") - def check_countries(cls, v: List[str], info: ValidationInfo) -> List[str]: + def check_countries(cls, v: list[str], info: ValidationInfo) -> list[str]: """Verifies that each country name is defined in `nomenclature.countries`.""" v = to_list(v) if invalid_country_names := set(v) - set(countries.names): @@ -291,7 +291,7 @@ def check_countries(cls, v: List[str], info: ValidationInfo) -> List[str]: return v @field_validator("iso3_codes") - def check_iso3_codes(cls, v: List[str], info: ValidationInfo) -> List[str]: + def check_iso3_codes(cls, v: list[str], info: ValidationInfo) -> list[str]: """Verifies that each ISO3 code is valid according to pycountry library.""" errors = ErrorCollector() if invalid_iso3_codes := [ @@ -315,9 +315,9 @@ class MetaCode(Code): Attributes ---------- - allowed_values : Optional(list[any]) + allowed_values : list[Any], optional An optional list of allowed values """ - allowed_values: List[Any] | None = None + allowed_values: list[Any] | None = None diff --git a/nomenclature/codelist.py b/nomenclature/codelist.py index 1c177a0c..16adaa26 100644 --- a/nomenclature/codelist.py +++ b/nomenclature/codelist.py @@ -1,7 +1,7 @@ import logging from pathlib import Path from textwrap import indent -from typing import ClassVar, Dict, List +from typing import ClassVar import numpy as np import pandas as pd @@ -34,7 +34,7 @@ class CodeList(BaseModel): """ name: str - mapping: Dict[str, Code] = {} + mapping: dict[str, Code] = {} # class variable validation_schema: ClassVar[str] = "generic" @@ -46,8 +46,8 @@ def __eq__(self, other): @field_validator("mapping") @classmethod def check_stray_tag( - cls, v: Dict[str, Code], info: ValidationInfo - ) -> Dict[str, Code]: + cls, v: dict[str, Code], info: ValidationInfo + ) -> dict[str, Code]: """Check that no stray tags are left in codes after tag replacement""" forbidden = ["{", "}"] @@ -75,8 +75,8 @@ def _check_string(value): @field_validator("mapping") @classmethod def check_end_whitespace( - cls, v: Dict[str, Code], info: ValidationInfo - ) -> Dict[str, Code]: + cls, v: dict[str, Code], info: ValidationInfo + ) -> dict[str, Code]: """Check that no code ends with a whitespace""" for code in v: if code.endswith(" "): @@ -127,7 +127,7 @@ def validate_data( return False return True - def validate_items(self, items: List[str]) -> List[str]: + def validate_items(self, items: list[str]) -> list[str]: """Validate that a list of items are valid codes Returns @@ -140,9 +140,9 @@ def validate_items(self, items: List[str]) -> List[str]: @classmethod def replace_tags( - cls, code_list: List[Code], tag_name: str, tags: List[Code] - ) -> List[Code]: - _code_list: List[Code] = [] + cls, code_list: list[Code], tag_name: str, tags: list[Code] + ) -> list[Code]: + _code_list: list[Code] = [] for code in code_list: if "{" + tag_name + "}" in code.name: @@ -155,15 +155,15 @@ def replace_tags( @classmethod def _parse_and_replace_tags( cls, - code_list: List[Code], + code_list: list[Code], path: Path, file_glob_pattern: str = "**/*", - ) -> List[Code]: + ) -> list[Code]: """Cast, validate and replace tags into list of codes for one dimension Parameters ---------- - code_list : List[Code] + code_list : list[Code] List of Code to modify path : :class:`pathlib.Path` or path-like Directory with the codelist files @@ -173,10 +173,10 @@ def _parse_and_replace_tags( Returns ------- - Dict[str, Code] :class: `nomenclature.Code` + dict[str, Code] :class: `nomenclature.Code` """ - tag_dict: Dict[str, List[Code]] = {} + tag_dict: dict[str, list[Code]] = {} for yaml_file in ( f @@ -240,7 +240,7 @@ def from_directory( ) code_list.extend(repo.filter_list_of_codes(repository_code_list)) errors = ErrorCollector() - mapping: Dict[str, Code] = {} + mapping: dict[str, Code] = {} for code in code_list: if code.name in mapping: errors.append( @@ -301,7 +301,7 @@ def _parse_codelist_dir( file_glob_pattern: str = "**/*", repository: str | None = None, ): - code_list: List[Code] = [] + code_list: list[Code] = [] for yaml_file in ( f for f in path.glob(file_glob_pattern) @@ -457,7 +457,7 @@ def to_excel( with pd.ExcelWriter(excel_writer, **kwargs) as writer: write_sheet(writer, sheet_name, self.to_pandas(sort_by_code)) - def codelist_repr(self, json_serialized=False) -> Dict: + def codelist_repr(self, json_serialized=False) -> dict: """Cast a CodeList into corresponding dictionary""" nice_dict = {} @@ -590,7 +590,7 @@ def check_weight_in_vars(cls, v): ) return v - def vars_default_args(self, variables: List[str]) -> List[VariableCode]: + def vars_default_args(self, variables: list[str]) -> list[VariableCode]: """return subset of variables which does not feature any special pyam aggregation arguments and where skip_region_aggregation is False""" return [ @@ -599,7 +599,7 @@ def vars_default_args(self, variables: List[str]) -> List[VariableCode]: if not self[var].agg_kwargs and not self[var].skip_region_aggregation ] - def vars_kwargs(self, variables: List[str]) -> List[VariableCode]: + def vars_kwargs(self, variables: list[str]) -> list[VariableCode]: # return subset of variables which features special pyam aggregation arguments # and where skip_region_aggregation is False return [ @@ -713,7 +713,7 @@ def from_directory( """ - code_list: List[RegionCode] = [] + code_list: list[RegionCode] = [] # initializing from general configuration # adding all countries @@ -763,7 +763,7 @@ def from_directory( ) # translate to mapping - mapping: Dict[str, RegionCode] = {} + mapping: dict[str, RegionCode] = {} errors = ErrorCollector() for code in code_list: @@ -783,12 +783,12 @@ def from_directory( return cls(name=name, mapping=mapping) @property - def hierarchy(self) -> List[str]: + def hierarchy(self) -> list[str]: """Return the hierarchies defined in the RegionCodeList Returns ------- - List[str] + list[str] """ return sorted(list({v.hierarchy for v in self.mapping.values()})) @@ -799,9 +799,9 @@ def _parse_region_code_dir( path: Path, file_glob_pattern: str = "**/*", repository: str | None = None, - ) -> List[RegionCode]: + ) -> list[RegionCode]: """""" - code_list: List[RegionCode] = [] + code_list: list[RegionCode] = [] for yaml_file in ( f for f in path.glob(file_glob_pattern) diff --git a/nomenclature/core.py b/nomenclature/core.py index 4da95f3d..8e38c547 100644 --- a/nomenclature/core.py +++ b/nomenclature/core.py @@ -1,5 +1,4 @@ import logging -from typing import Optional, Union, List import pyam from pydantic import validate_call @@ -14,8 +13,8 @@ def process( df: pyam.IamDataFrame, dsd: DataStructureDefinition, - dimensions: Optional[List[str]] = None, - processor: Optional[Union[Processor, List[Processor]]] = None, + dimensions: list[str] | None = None, + processor: Processor | list[Processor] | None = None, ) -> pyam.IamDataFrame: """Function for validation and region aggregation in one step diff --git a/nomenclature/error.py b/nomenclature/error.py index 6549a908..4cdca95f 100644 --- a/nomenclature/error.py +++ b/nomenclature/error.py @@ -1,7 +1,6 @@ import logging import textwrap from collections import namedtuple -from typing import Optional pydantic_custom_error_config = { "RegionNameCollisionError": ( @@ -49,7 +48,7 @@ class ErrorCollector: errors: list[Exception] - description: Optional[str] = None + description: str | None = None def __init__(self, description: str = None) -> None: self.errors = [] diff --git a/nomenclature/processor/data_validator.py b/nomenclature/processor/data_validator.py index c0731aac..6bd38df5 100644 --- a/nomenclature/processor/data_validator.py +++ b/nomenclature/processor/data_validator.py @@ -1,7 +1,6 @@ import logging import textwrap from pathlib import Path -from typing import List, Optional, Union import yaml from pyam import IamDataFrame @@ -52,8 +51,8 @@ def criteria(self): class DataValidationCriteriaBounds(IamcDataFilter): - upper_bound: Optional[float] = None - lower_bound: Optional[float] = None + upper_bound: float | None = None + lower_bound: float | None = None @model_validator(mode="after") def check_validation_criteria_exist(self): @@ -69,7 +68,7 @@ def validation_args(self): class DataValidator(Processor): """Processor for validating IAMC datapoints""" - criteria_items: List[DataValidationCriteriaBounds | DataValidationCriteriaValue] + criteria_items: list[DataValidationCriteriaBounds | DataValidationCriteriaValue] file: Path @field_validator("criteria_items", mode="before") @@ -84,7 +83,7 @@ def check_criteria(cls, v): return v @classmethod - def from_file(cls, file: Union[Path, str]) -> "DataValidator": + def from_file(cls, file: Path | str) -> "DataValidator": with open(file, "r", encoding="utf-8") as f: content = yaml.safe_load(f) return cls(file=file, criteria_items=content) diff --git a/nomenclature/processor/iamc.py b/nomenclature/processor/iamc.py index 1eba7abc..3cba8d98 100644 --- a/nomenclature/processor/iamc.py +++ b/nomenclature/processor/iamc.py @@ -1,4 +1,3 @@ -from typing import List from pydantic import BaseModel, ConfigDict, field_validator from pyam import IAMC_IDX @@ -9,12 +8,12 @@ class IamcDataFilter(BaseModel): model_config = ConfigDict(extra="forbid") - model: List[str] | None = None - scenario: List[str] | None = None - region: List[str] | None = None - variable: List[str] | None = None - unit: List[str] | None = None - year: List[int] | None = None + model: list[str] | None = None + scenario: list[str] | None = None + region: list[str] | None = None + variable: list[str] | None = None + unit: list[str] | None = None + year: list[int] | None = None @field_validator(*IAMC_IDX + ["year"], mode="before") @classmethod diff --git a/nomenclature/processor/region.py b/nomenclature/processor/region.py index 92ac5248..06b56ec9 100644 --- a/nomenclature/processor/region.py +++ b/nomenclature/processor/region.py @@ -1,7 +1,6 @@ import logging from collections import Counter from pathlib import Path -from typing import Dict, List, Optional, Tuple, Union from typing_extensions import Annotated import numpy as np @@ -42,12 +41,12 @@ class NativeRegion(BaseModel): ---------- name : str Name of the model native region. - rename: Optional[str] + rename: str, optional Optional second name that the region will be renamed to. """ name: str - rename: Optional[str] = None + rename: str | None = None @property def target_native_region(self) -> str: @@ -74,7 +73,7 @@ class CommonRegion(BaseModel): """ name: str - constituent_regions: List[str] + constituent_regions: list[str] @property def is_single_constituent_region(self): @@ -102,17 +101,17 @@ class RegionAggregationMapping(BaseModel): Name of the model for which RegionAggregationMapping is defined. file: FilePath File path of the mapping file. Saved mostly for error reporting purposes. - native_regions: Optional[List[NativeRegion]] + native_regions: list[NativeRegion], optional Optionally, list of model native regions to select and potentially rename. - common_regions: Optional[List[CommonRegion]] + common_regions: list[CommonRegion], optional Optionally, list of common regions where aggregation will be performed. """ - model: List[str] + model: list[str] file: FilePath - native_regions: List[NativeRegion] | None = None - common_regions: List[CommonRegion] | None = None - exclude_regions: List[str] | None = None + native_regions: list[NativeRegion] | None = None + common_regions: list[CommonRegion] | None = None + exclude_regions: list[str] | None = None @model_validator(mode="before") @classmethod @@ -234,12 +233,12 @@ def check_exclude_common_region_overlap( return _check_exclude_region_overlap(v, "common_regions") @classmethod - def from_file(cls, file: Union[Path, str]): + def from_file(cls, file: Path | str): """Initialize a RegionAggregationMapping from a file. Parameters ---------- - file : Union[Path, str] + file : Path | str Path to a yaml file which contains region aggregation information for one model. @@ -276,7 +275,7 @@ def from_yaml(cls, file: Path) -> "RegionAggregationMapping": # Reformat the "native_regions" if "native_regions" in mapping_input: - native_region_list: List[Dict] = [] + native_region_list: list[dict] = [] for native_region in mapping_input["native_regions"]: if isinstance(native_region, str): native_region_list.append({"name": native_region}) @@ -291,7 +290,7 @@ def from_yaml(cls, file: Path) -> "RegionAggregationMapping": # Reformat the "common_regions" if "common_regions" in mapping_input: - common_region_list: List[Dict[str, List[Dict[str, str]]]] = [] + common_region_list: list[dict[str, list[dict[str, str]]]] = [] for common_region in mapping_input["common_regions"]: common_region_name = list(common_region)[0] common_region_list.append( @@ -351,34 +350,34 @@ def from_excel(cls, file) -> "RegionAggregationMapping": ) @property - def all_regions(self) -> List[str]: + def all_regions(self) -> list[str]: # For the native regions we take the **renamed** (if given) names nr_list = [x.target_native_region for x in self.native_regions or []] return nr_list + self.common_region_names @property - def model_native_region_names(self) -> List[str]: + def model_native_region_names(self) -> list[str]: # List of the **original** model native region names return [x.name for x in self.native_regions or []] @property - def common_region_names(self) -> List[str]: + def common_region_names(self) -> list[str]: # List of the common region names return [x.name for x in self.common_regions or []] @property - def rename_mapping(self) -> Dict[str, str]: + def rename_mapping(self) -> dict[str, str]: return {r.name: r.target_native_region for r in self.native_regions or []} @property - def upload_native_regions(self) -> List[str]: + def upload_native_regions(self) -> list[str]: return [ native_region.target_native_region for native_region in self.native_regions or [] ] @property - def reverse_rename_mapping(self) -> Dict[str, str]: + def reverse_rename_mapping(self) -> dict[str, str]: return {renamed: original for original, renamed in self.rename_mapping.items()} def check_unexpected_regions(self, df: IamDataFrame) -> None: @@ -444,7 +443,7 @@ class RegionProcessor(Processor): region_codelist: RegionCodeList variable_codelist: VariableCodeList - mappings: Dict[ + mappings: dict[ str, Annotated[RegionAggregationMapping, AfterValidator(validate_with_definition)], ] @@ -477,7 +476,7 @@ def from_directory(cls, path: DirectoryPath, dsd: DataStructureDefinition): Raised if the provided DataStructureDefinition does not contain the dimensions ``region`` and ``variable``. """ - mapping_dict: Dict[str, RegionAggregationMapping] = {} + mapping_dict: dict[str, RegionAggregationMapping] = {} errors = ErrorCollector() mapping_files = [f for f in path.glob("**/*") if f.suffix in {".yaml", ".yml"}] @@ -543,7 +542,7 @@ def apply(self, df: IamDataFrame) -> IamDataFrame: * If *df* contains regions that are not listed in the model mapping, or * If the region-processing results in an empty **IamDataFrame**. """ - processed_dfs: List[IamDataFrame] = [] + processed_dfs: list[IamDataFrame] = [] for model in df.model: model_df = df.filter(model=model) @@ -571,7 +570,7 @@ def apply(self, df: IamDataFrame) -> IamDataFrame: def check_region_aggregation( self, df: IamDataFrame, rtol_difference: float = 0.01 - ) -> Tuple[IamDataFrame, pd.DataFrame]: + ) -> tuple[IamDataFrame, pd.DataFrame]: """Return region aggregation results and differences between aggregated and model native data @@ -584,7 +583,7 @@ def check_region_aggregation( Returns ------- - Tuple[IamDataFrame, pd.DataFrame] + tuple[IamDataFrame, pd.DataFrame] IamDataFrame containing aggregation results and pandas dataframe containing the differences """ @@ -605,7 +604,7 @@ def _apply_region_processing( model_df: IamDataFrame, return_aggregation_difference: bool = False, rtol_difference: float = 0.01, - ) -> Tuple[IamDataFrame, pd.DataFrame]: + ) -> tuple[IamDataFrame, pd.DataFrame]: """Apply the region processing for a single model""" if len(model_df.model) != 1: raise ValueError( @@ -616,7 +615,7 @@ def _apply_region_processing( # check for regions not mentioned in the model mapping self.mappings[model].check_unexpected_regions(model_df) - _processed_data: List[pd.Series] = [] + _processed_data: list[pd.Series] = [] # silence pyam's empty filter warnings with adjust_log_level(logger="pyam", level="ERROR"): @@ -747,7 +746,7 @@ def _compare_and_merge( aggregated: pd.Series, rtol: float = 0.01, return_aggregation_difference: bool = False, -) -> Tuple[IamDataFrame, pd.DataFrame]: +) -> tuple[IamDataFrame, pd.DataFrame]: """Compare and merge original and aggregated results""" # compare processed (aggregated) data and data provided at the common-region level diff --git a/nomenclature/processor/required_data.py b/nomenclature/processor/required_data.py index 76d124c2..68826085 100644 --- a/nomenclature/processor/required_data.py +++ b/nomenclature/processor/required_data.py @@ -1,6 +1,6 @@ import logging from pathlib import Path -from typing import Any, List, Tuple, Union, Annotated +from typing import Any, Annotated import pandas as pd import yaml @@ -24,7 +24,7 @@ class RequiredMeasurand(BaseModel): variable: str - unit: List[Union[str, None]] = Field(...) + unit: list[str | None] = Field(...) @field_validator("unit", mode="before") @classmethod @@ -43,12 +43,12 @@ def cast_to_RequiredMeasurand(v) -> RequiredMeasurand: class RequiredData(BaseModel): measurand: ( - List[Annotated[RequiredMeasurand, BeforeValidator(cast_to_RequiredMeasurand)]] + list[Annotated[RequiredMeasurand, BeforeValidator(cast_to_RequiredMeasurand)]] | None ) = None - variable: List[str] | None = None - region: List[str] | None = None - year: List[int] | None = None + variable: list[str] | None = None + region: list[str] | None = None + year: list[int] | None = None # TODO consider merging with IamcDataFilter @field_validator("measurand", "region", "year", "variable", mode="before") @@ -99,13 +99,13 @@ def validate_with_definition(self, dsd: DataStructureDefinition) -> None: raise ValueError(error_msg) @property - def variables(self) -> List[str]: + def variables(self) -> list[str]: if self.measurand is not None: return [m.variable for m in self.measurand] return self.variable @property - def pyam_required_data_list(self) -> List[List[dict]]: + def pyam_required_data_list(self) -> list[list[dict]]: if self.measurand is not None: return [ [ @@ -132,8 +132,8 @@ def pyam_required_data_list(self) -> List[List[dict]]: def _wrong_unit_variables( self, dsd: DataStructureDefinition - ) -> List[Tuple[str, str, str]]: - wrong_units: List[Tuple[str, Any, Any]] = [] + ) -> list[tuple[str, str, str]]: + wrong_units: list[tuple[str, Any, Any]] = [] if hasattr(dsd, "variable") and self.measurand is not None: wrong_units.extend( (m.variable, unit, dsd.variable[m.variable].unit) @@ -148,8 +148,8 @@ def _wrong_unit_variables( class RequiredDataValidator(Processor): description: str | None = None - model: List[str] | None = None - required_data: List[RequiredData] + model: list[str] | None = None + required_data: list[RequiredData] file: Path @field_validator("model", mode="before") @@ -158,7 +158,7 @@ def convert_to_list(cls, v): return pyam.utils.to_list(v) @classmethod - def from_file(cls, file: Union[Path, str]) -> "RequiredDataValidator": + def from_file(cls, file: Path | str) -> "RequiredDataValidator": with open(file, "r", encoding="utf-8") as f: content = yaml.safe_load(f) return cls(file=file, **content) @@ -195,7 +195,7 @@ def apply(self, df: IamDataFrame) -> IamDataFrame: def check_required_data_per_model( self, df: IamDataFrame, model: str - ) -> List[pyam.IamDataFrame]: + ) -> list[pyam.IamDataFrame]: model_df = df.filter(model=model) missing_data = [] for requirement in self.required_data: diff --git a/nomenclature/testing.py b/nomenclature/testing.py index 4cc524de..97a052a9 100644 --- a/nomenclature/testing.py +++ b/nomenclature/testing.py @@ -1,6 +1,5 @@ import logging from pathlib import Path -from typing import List, Optional import yaml @@ -55,7 +54,7 @@ def assert_valid_yaml(path: Path): def _check_mappings( path: Path, dsd: DataStructureDefinition, - mappings: Optional[str] = None, + mappings: str | None = None, ) -> None: if mappings is None: if (path / "mappings").is_dir(): @@ -86,7 +85,7 @@ def _check_processor_directory( dsd: DataStructureDefinition, processor: Processor, processor_arg: str, - folder: Optional[str] = None, + folder: str | None = None, ) -> None: if folder is None: if (path / processor_arg).is_dir(): @@ -102,10 +101,10 @@ def _check_processor_directory( def assert_valid_structure( path: Path, definitions: str = "definitions", - mappings: Optional[str] = None, - required_data: Optional[str] = None, - validate_data: Optional[str] = None, - dimensions: Optional[List[str]] = None, + mappings: str | None = None, + required_data: str | None = None, + validate_data: str | None = None, + dimensions: list[str] | None = None, ) -> None: """Assert that `path` can be initialized as a :class:`DataStructureDefinition` @@ -123,7 +122,7 @@ def assert_valid_structure( validate_data : str, optional Name of the folder for data validation criteria, defaults to "validate_date" (if this folder exists) - dimensions : List[str], optional + dimensions : list[str], optional Dimensions to be checked, defaults to all sub-folders of `definitions` Notes diff --git a/tests/test_codelist.py b/tests/test_codelist.py index 6efa7f99..82d8564d 100644 --- a/tests/test_codelist.py +++ b/tests/test_codelist.py @@ -358,7 +358,8 @@ def test_RegionCodeList_filter(): def test_RegionCodeList_hierarchy(): - """Verifies that the hierarchy method returns a List[str]""" + """Verifies that the hierarchy method returns a list""" + rcl = RegionCodeList.from_directory( "Region", MODULE_TEST_DATA_DIR / "region_to_filter_codelist"