pyhazards.benchmarks package

Submodules

pyhazards.benchmarks.base module

class pyhazards.benchmarks.base.Benchmark[source]

Bases: ABC

Shared benchmark contract for hazard evaluators.

_abc_impl = <_abc._abc_data object>
aggregate_metrics(results)[source]
Return type:

Dict[str, float]

abstractmethod evaluate(model, data, config)[source]
Return type:

BenchmarkResult

export_report(result, output_dir, formats)[source]
Return type:

Dict[str, str]

hazard_task: str = ''
name: str = 'benchmark'

pyhazards.benchmarks.registry module

pyhazards.benchmarks.registry.available_benchmarks()[source]
pyhazards.benchmarks.registry.build_benchmark(name)[source]
Return type:

Benchmark

pyhazards.benchmarks.registry.get_benchmark(name)[source]
pyhazards.benchmarks.registry.register_benchmark(name, builder)[source]
Return type:

None

pyhazards.benchmarks.runner module

pyhazards.benchmarks.runner.resolve_benchmark(benchmark)[source]
Return type:

Benchmark

pyhazards.benchmarks.runner.run_benchmark(benchmark, model, data, config, output_dir=None)[source]
Return type:

BenchmarkRunSummary

pyhazards.benchmarks.schemas module

class pyhazards.benchmarks.schemas.BenchmarkResult(benchmark_name, hazard_task, metrics, predictions=<factory>, artifacts=<factory>, metadata=<factory>)[source]

Bases: object

artifacts: Dict[str, str]
benchmark_name: str
hazard_task: str
metadata: Dict[str, Any]
metrics: Dict[str, float]
predictions: List[Any]
class pyhazards.benchmarks.schemas.BenchmarkRunSummary(benchmark_name, hazard_task, metrics, report_paths=<factory>, metadata=<factory>)[source]

Bases: object

benchmark_name: str
hazard_task: str
metadata: Dict[str, Any]
metrics: Dict[str, float]
report_paths: Dict[str, str]

pyhazards.benchmarks.earthquake module

class pyhazards.benchmarks.earthquake.EarthquakeBenchmark[source]

Bases: Benchmark

_abc_impl = <_abc._abc_data object>
evaluate(model, data, config)[source]
Return type:

BenchmarkResult

export_report(result, output_dir, formats)[source]
Return type:

Dict[str, str]

hazard_task: str = 'earthquake.picking'
metric_names_by_task = {'earthquake.forecasting': ['mae', 'mse'], 'earthquake.picking': ['p_pick_mae', 's_pick_mae', 'precision', 'recall', 'f1']}
name: str = 'earthquake'

pyhazards.benchmarks.wildfire module

class pyhazards.benchmarks.wildfire.WildfireBenchmark[source]

Bases: Benchmark

_abc_impl = <_abc._abc_data object>
evaluate(model, data, config)[source]
Return type:

BenchmarkResult

hazard_task: str = 'wildfire.danger'
metric_names_by_task = {'wildfire.danger': ['accuracy', 'macro_f1', 'auc', 'pr_auc', 'mae', 'rmse'], 'wildfire.spread': ['iou', 'f1', 'burned_area_mae']}
name: str = 'wildfire'

pyhazards.benchmarks.flood module

class pyhazards.benchmarks.flood.FloodBenchmark[source]

Bases: Benchmark

_abc_impl = <_abc._abc_data object>
evaluate(model, data, config)[source]
Return type:

BenchmarkResult

hazard_task: str = 'flood.streamflow'
metric_names_by_task = {'flood.inundation': ['pixel_mae', 'iou', 'f1'], 'flood.streamflow': ['mae', 'rmse', 'nse', 'kge']}
name: str = 'flood'

pyhazards.benchmarks.tc module

class pyhazards.benchmarks.tc.TropicalCycloneBenchmark[source]

Bases: Benchmark

_abc_impl = <_abc._abc_data object>
evaluate(model, data, config)[source]
Return type:

BenchmarkResult

hazard_task: str = 'tc.track_intensity'
metric_names_by_task = {'tc.track_intensity': ['track_error', 'intensity_mae']}
name: str = 'tc'

Module contents

class pyhazards.benchmarks.Benchmark[source]

Bases: ABC

Shared benchmark contract for hazard evaluators.

_abc_impl = <_abc._abc_data object>
aggregate_metrics(results)[source]
Return type:

Dict[str, float]

abstractmethod evaluate(model, data, config)[source]
Return type:

BenchmarkResult

export_report(result, output_dir, formats)[source]
Return type:

Dict[str, str]

hazard_task: str = ''
name: str = 'benchmark'
class pyhazards.benchmarks.BenchmarkResult(benchmark_name, hazard_task, metrics, predictions=<factory>, artifacts=<factory>, metadata=<factory>)[source]

Bases: object

artifacts: Dict[str, str]
benchmark_name: str
hazard_task: str
metadata: Dict[str, Any]
metrics: Dict[str, float]
predictions: List[Any]
class pyhazards.benchmarks.BenchmarkRunSummary(benchmark_name, hazard_task, metrics, report_paths=<factory>, metadata=<factory>)[source]

Bases: object

benchmark_name: str
hazard_task: str
metadata: Dict[str, Any]
metrics: Dict[str, float]
report_paths: Dict[str, str]
class pyhazards.benchmarks.EarthquakeBenchmark[source]

Bases: Benchmark

_abc_impl = <_abc._abc_data object>
evaluate(model, data, config)[source]
Return type:

BenchmarkResult

export_report(result, output_dir, formats)[source]
Return type:

Dict[str, str]

hazard_task: str = 'earthquake.picking'
metric_names_by_task = {'earthquake.forecasting': ['mae', 'mse'], 'earthquake.picking': ['p_pick_mae', 's_pick_mae', 'precision', 'recall', 'f1']}
name: str = 'earthquake'
class pyhazards.benchmarks.FloodBenchmark[source]

Bases: Benchmark

_abc_impl = <_abc._abc_data object>
evaluate(model, data, config)[source]
Return type:

BenchmarkResult

hazard_task: str = 'flood.streamflow'
metric_names_by_task = {'flood.inundation': ['pixel_mae', 'iou', 'f1'], 'flood.streamflow': ['mae', 'rmse', 'nse', 'kge']}
name: str = 'flood'
class pyhazards.benchmarks.TropicalCycloneBenchmark[source]

Bases: Benchmark

_abc_impl = <_abc._abc_data object>
evaluate(model, data, config)[source]
Return type:

BenchmarkResult

hazard_task: str = 'tc.track_intensity'
metric_names_by_task = {'tc.track_intensity': ['track_error', 'intensity_mae']}
name: str = 'tc'
class pyhazards.benchmarks.WildfireBenchmark[source]

Bases: Benchmark

_abc_impl = <_abc._abc_data object>
evaluate(model, data, config)[source]
Return type:

BenchmarkResult

hazard_task: str = 'wildfire.danger'
metric_names_by_task = {'wildfire.danger': ['accuracy', 'macro_f1', 'auc', 'pr_auc', 'mae', 'rmse'], 'wildfire.spread': ['iou', 'f1', 'burned_area_mae']}
name: str = 'wildfire'
pyhazards.benchmarks.available_benchmarks()[source]
pyhazards.benchmarks.build_benchmark(name)[source]
Return type:

Benchmark

pyhazards.benchmarks.get_benchmark(name)[source]
pyhazards.benchmarks.register_benchmark(name, builder)[source]
Return type:

None

pyhazards.benchmarks.run_benchmark(benchmark, model, data, config, output_dir=None)[source]
Return type:

BenchmarkRunSummary