diff --git a/CHANGELOG.md b/CHANGELOG.md index d7362c938cb..6695a09db43 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed multiclass recall macro avg. ignore index ([#2710](https://github.com/Lightning-AI/torchmetrics/pull/2710)) +- Fixed handling zero division error in binary IoU (Jaccard index) calculation ([#2726](https://github.com/Lightning-AI/torchmetrics/pull/2726)) + + - Correct the padding related calculation errors in SSIM ([#2721](https://github.com/Lightning-AI/torchmetrics/pull/2721)) diff --git a/src/torchmetrics/functional/classification/jaccard.py b/src/torchmetrics/functional/classification/jaccard.py index 1d240df68af..dfddd68255f 100644 --- a/src/torchmetrics/functional/classification/jaccard.py +++ b/src/torchmetrics/functional/classification/jaccard.py @@ -67,7 +67,7 @@ def _jaccard_index_reduce( raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") confmat = confmat.float() if average == "binary": - return confmat[1, 1] / (confmat[0, 1] + confmat[1, 0] + confmat[1, 1]) + return _safe_divide(confmat[1, 1], (confmat[0, 1] + confmat[1, 0] + confmat[1, 1]), zero_division=zero_division) ignore_index_cond = ignore_index is not None and 0 <= ignore_index < confmat.shape[0] multilabel = confmat.ndim == 3 diff --git a/tests/unittests/classification/test_jaccard.py b/tests/unittests/classification/test_jaccard.py index 6901868eac9..e7afdb557a6 100644 --- a/tests/unittests/classification/test_jaccard.py +++ b/tests/unittests/classification/test_jaccard.py @@ -26,6 +26,7 @@ MultilabelJaccardIndex, ) from torchmetrics.functional.classification.jaccard import ( + _jaccard_index_reduce, binary_jaccard_index, multiclass_jaccard_index, multilabel_jaccard_index, @@ -403,6 +404,26 @@ def test_corner_case(): assert torch.allclose(res, out) +def test_jaccard_index_zero_division(): + """Issue: https://github.com/Lightning-AI/torchmetrics/issues/2658.""" + # Test case where all pixels are background (zeros) + confmat = torch.tensor([[4, 0], [0, 0]]) + + # Test with zero_division=0.0 + result = _jaccard_index_reduce(confmat, average="binary", zero_division=0.0) + assert result == 0.0, f"Expected 0.0, but got {result}" + + # Test with zero_division=1.0 + result = _jaccard_index_reduce(confmat, average="binary", zero_division=1.0) + assert result == 1.0, f"Expected 1.0, but got {result}" + + # Test case with some foreground pixels + confmat = torch.tensor([[2, 1], [1, 1]]) + result = _jaccard_index_reduce(confmat, average="binary", zero_division=0.0) + expected = 1 / 3 + assert torch.isclose(result, torch.tensor(expected)), f"Expected {expected}, but got {result}" + + @pytest.mark.parametrize( ("metric", "kwargs"), [