diff --git a/pyproject.toml b/pyproject.toml index eb308926..40a909b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ authors = [ requires-python = ">=3.8" keywords = ["geospatial", "evaluations"] license = {text = "MIT"} -version = "0.2.5" +version = "0.2.6" dynamic = ["readme", "dependencies"] [project.optional-dependencies] diff --git a/requirements.txt b/requirements.txt index ff5699f7..ac3ed7ac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,4 +14,4 @@ flox==0.7.2 xskillscore==0.0.24 pyogrio==0.7.2 pystac-client==0.7.5 -stackstac==0.5.0 +s3fs<=2023.12.1 diff --git a/src/gval/accessors/gval_xarray.py b/src/gval/accessors/gval_xarray.py index 4d753d1a..a2631009 100644 --- a/src/gval/accessors/gval_xarray.py +++ b/src/gval/accessors/gval_xarray.py @@ -93,15 +93,18 @@ def __handle_attribute_tracking( else: del attribute_tracking_kwargs["agreement_map"] - results = candidate_map.gval.attribute_tracking_xarray( + results = _attribute_tracking_xarray( + candidate_map=candidate_map, benchmark_map=benchmark_map, agreement_map=agreement_map, **attribute_tracking_kwargs, ) else: - results = candidate_map.gval.attribute_tracking_xarray( - benchmark_map=benchmark_map, agreement_map=agreement_map + results = _attribute_tracking_xarray( + candidate_map=candidate_map, + benchmark_map=benchmark_map, + agreement_map=agreement_map, ) return results diff --git a/src/gval/comparison/tabulation.py b/src/gval/comparison/tabulation.py index ecc856c5..34df9a59 100644 --- a/src/gval/comparison/tabulation.py +++ b/src/gval/comparison/tabulation.py @@ -243,6 +243,15 @@ def _crosstab_Datasets(agreement_map: xr.DataArray) -> DataFrame[Crosstab_df]: # loop variables previous_crosstab_df = None # initializing to avoid having unset for i, b in enumerate(agreement_variable_names): + # Pass pairing dictionary to variable if necessary + if ( + agreement_map[b].attrs.get("pairing_dictionary") is None + and agreement_map.attrs.get("pairing_dictionary") is not None + ): + agreement_map[b].attrs["pairing_dictionary"] = agreement_map.attrs[ + "pairing_dictionary" + ] + crosstab_df = _crosstab_2d_DataArrays( agreement_map=agreement_map[b], band_value=b ) diff --git a/src/gval/utils/loading_datasets.py b/src/gval/utils/loading_datasets.py index 2c9b7934..dd73b87f 100644 --- a/src/gval/utils/loading_datasets.py +++ b/src/gval/utils/loading_datasets.py @@ -4,20 +4,19 @@ __author__ = "Fernando Aristizabal" -import warnings from typing import Union, Optional, Tuple, Iterable from numbers import Number import ast +from collections import Counter +import pandas as pd import rioxarray as rxr import xarray as xr import numpy as np from tempfile import NamedTemporaryFile from rio_cogeo.cogeo import cog_translate from rio_cogeo.profiles import cog_profiles -import pystac_client - -import stackstac +from pystac.item_collection import ItemCollection _MEMORY_STRATEGY = "normal" @@ -238,205 +237,103 @@ def _convert_to_dataset(xr_object=Union[xr.DataArray, xr.Dataset]) -> xr.Dataset return xr_object -def _get_raster_band_nodata(band_metadata, nodata_fill) -> Number: - """ - Extracts nodata information from STAC APIs that implement Raster Extension +def stac_to_df( + stac_items: ItemCollection, + assets: list = None, + attribute_allow_list: list = None, + attribute_block_list: list = None, +) -> pd.DataFrame: + """Convert STAC Items in to a DataFrame Parameters ---------- - band_metadata : list - Metadata from raster:bands extension - nodata_fill : Number - Fill in value for missing data + stac_items: ItemCollection + STAC Item Collection returned from pystac client + assets : list, default = None + Assets to keep, (keep all if None) + attribute_allow_list: list, default = None + List of columns to allow in the result DataFrame + attribute_block_list: list, default = None + List of columns to remove in the result DataFrame Returns ------- - Number - Number representing nodata + pd.DataFrame + A DataFrame with rows for each unique item/asset combination Raises ------ ValueError + Allow and block lists should be mutually exclusive + ValueError + No entries in DataFrame due to nonexistent asset + ValueError + There are no assets in this query to run a catalog comparison """ - if band_metadata: - prop_string = str(band_metadata.coords["raster:bands"].values) - idx1, idx2 = prop_string.find("{"), prop_string.rfind("}") + item_dfs, compare_idx = [], 1 - return ast.literal_eval(prop_string[idx1 : idx2 + 1]).get("nodata") - else: - if nodata_fill is None: - raise ValueError( - "Must have nodata fill value if nodata is not present in metadata" + # Check for mutually exclusive lists + if ( + len( + list( + ( + Counter(attribute_allow_list) & Counter(attribute_block_list) + ).elements() ) - - return nodata_fill - - -def _set_nodata( - stack: xr.DataArray, band_metadata: list = None, nodata_fill: Number = None -) -> Number: - """ - Sets nodata information from STAC APIs that implement Raster Extension - - Parameters - ---------- - stack : xr.DataArray - Data to set nodata attribute - band_metadata : list - Metadata from raster:bands extension - nodata_fill : Number - Fill in value for missing data - - """ - - if stack.rio.nodata is not None: - stack.rio.write_nodata(stack.rio.nodata, inplace=True) - elif stack.rio.encoded_nodata is not None: - stack.rio.write_nodata(stack.rio.encoded_nodata, inplace=True) - else: - stack.rio.write_nodata( - _get_raster_band_nodata(band_metadata, nodata_fill), inplace=True + ) + > 0 + ): + raise ValueError( + "There are no assets in this query to run a catalog comparison" ) + # Iterate through each STAC Item and make a unique row for each asset + for item in stac_items: + item_dict = item.to_dict() + item_df = pd.json_normalize(item_dict) + mask = item_df.columns.str.contains("assets.*") + og_df = item_df.loc[:, ~mask] + + if ( + assets is not None + and np.sum([asset not in item_dict["assets"].keys() for asset in assets]) + > 0 + ): + raise ValueError("Non existent asset in parameter assets") + + dfs = [] + + # Make a unique row for each asset + for key, val in item_dict["assets"].items(): + if assets is None or key in assets: + df = pd.json_normalize(val) + df["asset"] = key + df["compare_id"] = compare_idx + df["map_id"] = val["href"] + compare_idx += 1 + concat_df = pd.concat([og_df, df], axis=1) + dfs.append(concat_df.loc[:, ~concat_df.columns.duplicated()]) + + if len(dfs) < 1: + raise ValueError( + "There are no assets in this query to run a catalog comparison. " + "Please revisit original query." + ) -def _set_crs(stack: xr.DataArray, band_metadata: list = None) -> Number: - """ - - Parameters - ---------- - stack : xr.DataArray - Original data with no information - band_metadata : dict - Information with band metadata - - Returns - ------- - Xarray DataArray with proper CRS - - """ - - if stack.rio.crs is not None: - return stack.rio.write_crs(stack.rio.crs) - else: - return stack.rio.write_crs(f"EPSG:{band_metadata['epsg'].values}") - - -def get_stac_data( - url: str, - collection: str, - time: str, - bands: list = None, - query: str = None, - time_aggregate: str = None, - max_items: int = None, - intersects: dict = None, - bbox: list = None, - resolution: int = None, - nodata_fill: Number = None, -) -> xr.Dataset: - """ - - Parameters - ---------- - url : str - Address hosting the STAC API - collection : str - Name of collection to get (currently limited to one) - time : str - Single or range of values to query in the time dimension - bands: list, default = None - Bands to retrieve from service - query : str, default = None - String command to filter data - time_aggregate : str, default = None - Method to aggregate multiple time stamps - max_items : int, default = None - The maximum amount of records to retrieve - intersects : dict, default = None - Dictionary representing the type of geometry and its respective coordinates - bbox : list, default = None - Coordinates to filter the spatial range of request - resolution : int, default = 10 - Resolution to get data from - nodata_fill : Number, default = None - Value to fill nodata where not present + item_dfs.append(pd.concat(dfs, ignore_index=True)) - Returns - ------- - xr.Dataset - Xarray object with resepective STAC API data + # Concatenate the DataFrames and remove unwanted columns if allow and block lists exist + catalog_df = pd.concat(item_dfs, ignore_index=True) - """ + if attribute_allow_list is not None: + catalog_df = catalog_df[attribute_allow_list] - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - # Call cataloging url, search, and convert to xarray - catalog = pystac_client.Client.open(url) - - stac_items = catalog.search( - datetime=time, - collections=[collection], - max_items=max_items, - intersects=intersects, - bbox=bbox, - query=query, - ).get_all_items() - - stack = stackstac.stack(stac_items, resolution=resolution) - - # Only get unique time indices in case there are duplicates - _, idxs = np.unique(stack.coords["time"], return_index=True) - stack = stack[idxs] - - # Aggregate if there is more than one time - if stack.coords["time"].shape[0] > 1: - crs = stack.rio.crs - if time_aggregate == "mean": - stack = stack.mean(dim="time") - stack.attrs["time_aggregate"] = "mean" - elif time_aggregate == "min": - stack = stack.min(dim="time") - stack.attrs["time_aggregate"] = "min" - elif time_aggregate == "max": - stack = stack.max(dim="time") - stack.attrs["time_aggregate"] = "max" - else: - raise ValueError("A valid aggregate must be used for time ranges") - - stack.rio.write_crs(crs, inplace=True) - else: - stack = stack[0] - stack.attrs["time_aggregate"] = "none" - - # Select specific bands - if bands is not None: - bands = [bands] if isinstance(bands, str) else bands - stack = stack.sel({"band": bands}) - - band_metadata = ( - stack.coords["raster:bands"] if "raster:bands" in stack.coords else None - ) - if "band" in stack.dims: - og_names = [name for name in stack.coords["band"]] - names = [f"band_{x + 1}" for x in range(len(stack.coords["band"]))] - stack = stack.assign_coords({"band": names}).to_dataset(dim="band") - - for metadata, var, og_var in zip(band_metadata, stack.data_vars, og_names): - _set_nodata(stack[var], metadata, nodata_fill) - stack[var] = _set_crs(stack[var], band_metadata) - stack[var].attrs["original_name"] = og_var - - else: - stack = stack.to_dataset(name="band_1") - _set_nodata(stack["band_1"], band_metadata, nodata_fill) - stack["band_1"] = _set_crs(stack["band_1"]) - stack["band_1"].attrs["original_name"] = ( - bands[0] if isinstance(bands, list) else bands - ) + if attribute_block_list is not None: + catalog_df = catalog_df.drop(attribute_block_list, axis=1) - return stack + return catalog_df def _create_circle_mask( diff --git a/tests/cases_catalogs.py b/tests/cases_catalogs.py index e216d980..1848bad3 100644 --- a/tests/cases_catalogs.py +++ b/tests/cases_catalogs.py @@ -461,3 +461,77 @@ def case_compare_catalogs_fail( agreement_map_field, expected_exception, ) + + +url = "https://earth-search.aws.element84.com/v1" +collection = "sentinel-2-l2a" +times = ["2020-04-01", "2020-04-03"] +bbox = [-105.78, 35.79, -105.72, 35.84] +assets = ["aot"] + +expected_stac_df = [ + f"{TEST_DATA_DIR}/expected_catalog_df_no_filter.pkl", + f"{TEST_DATA_DIR}/expected_catalog_df_allow_list.pkl", + f"{TEST_DATA_DIR}/expected_catalog_df_block_list.pkl", +] + +allow_list = [None, ["map_id", "compare_id"], None] +block_list = [None, None, ["properties.mgrs:utm_zone"]] + + +@parametrize( + "url, collection, times, bbox, assets, allow_list, block_list, expected_catalog_df", + list( + zip( + [url] * 3, + [collection] * 3, + [times] * 3, + [bbox] * 3, + [assets] * 3, + allow_list, + block_list, + expected_stac_df, + ) + ), +) +def case_stac_catalog_comparison_success( + url, collection, times, bbox, assets, allow_list, block_list, expected_catalog_df +): + return ( + url, + collection, + times, + bbox, + assets, + allow_list, + block_list, + pd.read_pickle(expected_catalog_df), + ) + + +bad_times = ["2020-04-01"] +bad_assets = [["surface_water"], None, None, None] +exceptions = [ValueError, ValueError, KeyError, KeyError] +bad_allow_list = [None, ["arb"], ["arb"], None] +bad_block_list = [None, ["arb"], None, ["arb"]] + + +@parametrize( + "url, collection, time, bbox, assets, allow_list, block_list, exception", + list( + zip( + [url] * len(bad_assets), + [collection] * len(bad_assets), + bad_times * len(bad_assets), + [bbox] * len(bad_assets), + bad_assets, + bad_allow_list, + bad_block_list, + exceptions, + ) + ), +) +def case_stac_catalog_comparison_fail( + url, collection, time, bbox, assets, allow_list, block_list, exception +): + return url, collection, time, bbox, assets, allow_list, block_list, exception diff --git a/tests/cases_stac.py b/tests/cases_stac.py deleted file mode 100644 index 272425fc..00000000 --- a/tests/cases_stac.py +++ /dev/null @@ -1,193 +0,0 @@ -""" -Test functionality for gval/statistics modules -""" - -# __all__ = ['*'] - - -import numpy as np -import pandas as pd -from pytest_cases import parametrize - -url = "https://earth-search.aws.element84.com/v1" -collection = "sentinel-2-l2a" -bbox = [-105.78, 35.79, -105.72, 35.84] - - -bands = ["aot", ["aot"], ["aot"], ["aot"], ["aot", "rededge1"]] - - -time = [ - ["2020-04-01", "2020-04-03"], - ["2020-04-01/2020-04-03", "2020-04-06/2020-04-08"], - ["2020-04-01/2020-04-03", "2020-04-06/2020-04-08"], - ["2020-04-01/2020-04-03", "2020-04-06/2020-04-08"], - ["2020-04-01", "2020-04-03"], -] - -time_aggreagte = [None, "mean", "min", "max", None] - -nodata_fill = [None, None, None, None, 0] - -expected_df = [ - { - "band": ["1"], - "coefficient_of_determination": [-1.4498682989135663], - "mean_absolute_error": [0.025393391237534115], - "mean_absolute_percentage_error": [0.20440844788169352], - "mean_normalized_mean_absolute_error": [0.20592052600829783], - "mean_normalized_root_mean_squared_error": [0.2110757427848459], - "mean_percentage_error": [-0.20540028455073725], - "mean_signed_error": [-0.025329236900295846], - "mean_squared_error": [0.0006775147935436701], - "range_normalized_mean_absolute_error": [0.31741739046917644], - "range_normalized_root_mean_squared_error": [0.32536392930255564], - "root_mean_squared_error": [0.026029114344204452], - "symmetric_mean_absolute_percentage_error": [0.22778286629345662], - }, - { - "band": ["1"], - "coefficient_of_determination": [0.5954048574471591], - "mean_absolute_error": [0.011375152039973328], - "mean_absolute_percentage_error": [0.09299193429857326], - "mean_normalized_mean_absolute_error": [0.09198102377977661], - "mean_normalized_root_mean_squared_error": [0.0950603218421001], - "mean_percentage_error": [-0.03739148233378963], - "mean_signed_error": [-0.004624147232424582], - "mean_squared_error": [0.0001382026920448174], - "range_normalized_mean_absolute_error": [0.10036898858799995], - "range_normalized_root_mean_squared_error": [0.10372909504665788], - "root_mean_squared_error": [0.011755964105287894], - "symmetric_mean_absolute_percentage_error": [0.09373343991103603], - }, - { - "band": ["1"], - "coefficient_of_determination": [0.62257119465569], - "mean_absolute_error": [0.017518999671533943], - "mean_absolute_percentage_error": [0.19568589409159515], - "mean_normalized_mean_absolute_error": [0.15469671510079602], - "mean_normalized_root_mean_squared_error": [0.1715846316993916], - "mean_percentage_error": [0.013368590460465646], - "mean_signed_error": [0.0015139580160649765], - "mean_squared_error": [0.0003775836662784796], - "range_normalized_mean_absolute_error": [0.11450326582701924], - "range_normalized_root_mean_squared_error": [0.12700334769555513], - "root_mean_squared_error": [0.019431512197419933], - "symmetric_mean_absolute_percentage_error": [0.15366954251075923], - }, - { - "band": ["1"], - "coefficient_of_determination": [-0.41271964773267755], - "mean_absolute_error": [0.011908415300546453], - "mean_absolute_percentage_error": [0.0882205870282268], - "mean_normalized_mean_absolute_error": [0.08814209475433238], - "mean_normalized_root_mean_squared_error": [0.08996232968540896], - "mean_percentage_error": [-0.08717281243113986], - "mean_signed_error": [-0.011777460658723765], - "mean_squared_error": [0.00014772792439308438], - "range_normalized_mean_absolute_error": [0.16772415916262606], - "range_normalized_root_mean_squared_error": [0.17118785462101266], - "root_mean_squared_error": [0.0121543376780919], - "symmetric_mean_absolute_percentage_error": [0.09215897319648417], - }, # One more for a multi band example - { - "band": ["1", "2"], - "coefficient_of_determination": [-1.4498682989135663, -0.7514062048150743], - "mean_absolute_error": [0.025393391237534115, 0.10266655092378396], - "mean_absolute_percentage_error": [0.20440844788169352, np.inf], - "mean_normalized_mean_absolute_error": [ - 0.20592052600829783, - 1.5904259451440834, - ], - "mean_normalized_root_mean_squared_error": [ - 0.2110757427848459, - 2.8406238608292003, - ], - "mean_percentage_error": [-0.20540028455073725, 0.8574267097258231], - "mean_signed_error": [-0.025329236900295846, 0.05534935042165941], - "mean_squared_error": [0.0006775147935436701, 0.03362470648587737], - "range_normalized_mean_absolute_error": [ - 0.31741739046917644, - 0.06341747539921179, - ], - "range_normalized_root_mean_squared_error": [ - 0.32536392930255564, - 0.11326852052594609, - ], - "root_mean_squared_error": [0.026029114344204452, 0.18337040787945413], - "symmetric_mean_absolute_percentage_error": [ - 0.22778286629345662, - 0.8343404150271029, - ], - }, -] - - -@parametrize( - "url, collection, bbox, time, bands, time_aggregate, nodata_fill, expected_df", - list( - zip( - [url] * len(time), - [collection] * len(time), - [bbox] * len(time), - time, - bands, - time_aggreagte, - nodata_fill, - expected_df, - ) - ), -) -def case_stac_api_call( - url, collection, bbox, time, bands, time_aggregate, nodata_fill, expected_df -): - return ( - url, - collection, - bbox, - time, - bands, - time_aggregate, - nodata_fill, - pd.DataFrame(expected_df), - ) - - -bands_fail = [["aot"], ["aot"], ["red"]] - -time_fail = [ - ["1945-04-01", "1945-04-03"], - ["2020-04-01/2020-04-03", "2020-04-06/2020-04-08"], - ["2020-04-01", "2020-04-03"], -] - -time_aggreagte_fail = [None, None, None] - -nodata_fill_fail = [ - None, - None, - None, -] - -exceptions = [ValueError] * 3 - - -@parametrize( - "url, collection, bbox, time, bands, time_aggregate, nodata_fill, exception", - list( - zip( - [url] * len(time), - [collection] * len(time), - [bbox] * len(time), - time_fail, - bands_fail, - time_aggreagte_fail, - nodata_fill_fail, - exceptions, - ) - ), -) -def case_stac_api_call_fail( - url, collection, bbox, time, bands, time_aggregate, nodata_fill, exception -): - return url, collection, bbox, time, bands, time_aggregate, nodata_fill, exception diff --git a/tests/test_catalogs.py b/tests/test_catalogs.py index a67ac991..3c5fdbb8 100644 --- a/tests/test_catalogs.py +++ b/tests/test_catalogs.py @@ -9,9 +9,11 @@ import dask.dataframe as dd import rioxarray as rxr import xarray as xr +import pystac_client from tests.conftest import _attributes_to_string from gval.catalogs.catalogs import catalog_compare +from gval.utils.loading_datasets import stac_to_df @parametrize_with_cases( @@ -219,3 +221,88 @@ def test_compare_catalogs_fail( open_kwargs=open_kwargs, agreement_map_field=agreement_map_field, ) + + +@parametrize_with_cases( + "url, collection, times, bbox, assets, allow_list, block_list, expected_catalog_df", + glob="stac_catalog_comparison_success", +) +def test_stac_catalog_comparison_success( + url, collection, times, bbox, assets, allow_list, block_list, expected_catalog_df +): + catalog = pystac_client.Client.open(url) + + candidate_items = catalog.search( + datetime=times[0], + collections=[collection], + bbox=bbox, + ).item_collection() + + candidate_catalog = stac_to_df( + stac_items=candidate_items, + assets=assets, + attribute_allow_list=allow_list, + attribute_block_list=block_list, + ) + + benchmark_items = catalog.search( + datetime=times[1], + collections=[collection], + bbox=bbox, + ).item_collection() + + benchmark_catalog = stac_to_df( + stac_items=benchmark_items, + assets=assets, + attribute_allow_list=allow_list, + attribute_block_list=block_list, + ) + + arguments = { + "candidate_catalog": candidate_catalog, + "benchmark_catalog": benchmark_catalog, + "on": "compare_id", + "map_ids": "map_id", + "how": "inner", + "compare_type": "continuous", + "compare_kwargs": { + "metrics": ( + "coefficient_of_determination", + "mean_absolute_error", + "mean_absolute_percentage_error", + ), + "encode_nodata": True, + "nodata": -9999, + }, + "open_kwargs": {"mask_and_scale": True, "masked": True}, + } + + stac_clog = catalog_compare(**arguments) + + pd.testing.assert_frame_equal( + stac_clog, expected_catalog_df, check_dtype=False, check_index_type=False + ), "Computed catalog did not match the expected catalog df" + + +@parametrize_with_cases( + "url, collection, time, bbox, assets, allow_list, block_list, exception", + glob="stac_catalog_comparison_fail", +) +def test_stac_catalog_comparison_fail( + url, collection, time, bbox, assets, allow_list, block_list, exception +): + with raises(exception): + catalog = pystac_client.Client.open(url) + + candidate_items = catalog.search( + datetime=time, + collections=[collection], + bbox=bbox, + ).item_collection() + + _ = stac_to_df( + stac_items=candidate_items, + assets=assets, + attribute_allow_list=allow_list, + attribute_block_list=block_list, + ) diff --git a/tests/test_stac.py b/tests/test_stac.py deleted file mode 100644 index 3af1d0ba..00000000 --- a/tests/test_stac.py +++ /dev/null @@ -1,91 +0,0 @@ -from pytest_cases import parametrize_with_cases -from pytest import raises -import xarray as xr -import pandas as pd - -from gval.utils.loading_datasets import get_stac_data - - -@parametrize_with_cases( - "url, collection, bbox, time, bands, time_aggregate, nodata_fill, expected_df", - glob="stac_api_call", -) -def test_stac_api_call( - url, collection, bbox, time, bands, time_aggregate, nodata_fill, expected_df -): - """ - Tests call for stac API, (IDK if this data can be mocked, API calls in unit tests are dubious) - """ - - candidate = get_stac_data( - url=url, - collection=collection, - time=time[0], - bands=bands, - bbox=bbox, - time_aggregate=time_aggregate, - nodata_fill=nodata_fill, - ) - - benchmark = get_stac_data( - url=url, - collection=collection, - time=time[1], - bands=bands, - bbox=bbox, - time_aggregate=time_aggregate, - nodata_fill=nodata_fill, - ) - - # Registered metrics from previous tests affect this comparison so metrics are explicit - agreement, metrics = candidate.gval.continuous_compare( - benchmark, - metrics=[ - "coefficient_of_determination", - "mean_absolute_error", - "mean_absolute_percentage_error", - "mean_normalized_mean_absolute_error", - "mean_normalized_root_mean_squared_error", - "mean_percentage_error", - "mean_signed_error", - "mean_squared_error", - "range_normalized_mean_absolute_error", - "range_normalized_root_mean_squared_error", - "root_mean_squared_error", - "symmetric_mean_absolute_percentage_error", - ], - ) - - bnds = [bands] if isinstance(bands, str) else bands - - assert isinstance(agreement, xr.Dataset) - assert bnds == [ - agreement[var].attrs["original_name"] for var in agreement.data_vars - ] - - pd.testing.assert_frame_equal( - metrics, expected_df, check_dtype=False - ), "Compute statistics did not return expected values" - - -@parametrize_with_cases( - "url, collection, bbox, time, bands, time_aggregate, nodata_fill, exception", - glob="stac_api_call_fail", -) -def test_stac_api_call_fail( - url, collection, bbox, time, bands, time_aggregate, nodata_fill, exception -): - """ - Tests call for stac API fail - """ - - with raises(exception): - _ = get_stac_data( - url=url, - collection=collection, - time=time[0], - bands=bands, - bbox=bbox, - time_aggregate=time_aggregate, - nodata_fill=nodata_fill, - )