Documentation for single operator E2E tests #558
582 tests run, 273 passed, 300 skipped, 9 failed.
Annotations
Check failure on line 930 in forge/test/mlir/test_ops.py
github-actions / TT-Forge-FE Tests
test_ops.test_reduce_sum[-2-32]
assert False
+ where False = compare_with_golden_pcc(golden=tensor([[[12.9838, 14.8508, 15.1358, 14.8151, 14.3932, 21.0588, 17.5135,\n 17.5356, 17.2288, 13.5338, 16.9649,... 13.8547, 18.8564, 14.0658, 18.0882, 14.2574, 15.2911, 18.4941,\n 14.9824, 16.5938, 12.7449, 18.8444]]]), calculated=tensor([[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0000, 0.0000, 0.0000, 0.0000,... 13.8125, 18.7500, 14.1250, 18.0000, 14.2500, 15.3125, 18.5000,\n 14.9375, 16.5000, 12.6875, 18.7500]]]), pcc=0.99)
Raw output
input_shape = (1, 32, 32), dim = -2
@pytest.mark.parametrize("input_shape", [(1, 32, 32), (1, 64, 64), (1, 128, 128, 128)], ids=["32", "64", "128"])
@pytest.mark.parametrize("dim", [-1, -2], ids=["-1", "-2"])
@pytest.mark.push
def test_reduce_sum(input_shape, dim):
class ReduceSum(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
# reduce is supported on tt-metal only with keepdim=True
return torch.sum(a, dim=dim, keepdim=True)
inputs = [torch.rand(input_shape)]
framework_model = ReduceSum()
fw_out = framework_model(*inputs)
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
co_out = compiled_model(*inputs)
co_out = [co.to("cpu") for co in co_out]
> assert compare_with_golden_pcc(golden=fw_out, calculated=co_out[0], pcc=0.99)
E assert False
E + where False = compare_with_golden_pcc(golden=tensor([[[12.9838, 14.8508, 15.1358, 14.8151, 14.3932, 21.0588, 17.5135,\n 17.5356, 17.2288, 13.5338, 16.9649,... 13.8547, 18.8564, 14.0658, 18.0882, 14.2574, 15.2911, 18.4941,\n 14.9824, 16.5938, 12.7449, 18.8444]]]), calculated=tensor([[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0000, 0.0000, 0.0000, 0.0000,... 13.8125, 18.7500, 14.1250, 18.0000, 14.2500, 15.3125, 18.5000,\n 14.9375, 16.5000, 12.6875, 18.7500]]]), pcc=0.99)
forge/test/mlir/test_ops.py:930: AssertionError
Check failure on line 975 in forge/test/mlir/test_ops.py
github-actions / TT-Forge-FE Tests
test_ops.test_reduce_mean[-2-input_shape1]
assert False
+ where False = compare_with_golden_pcc(golden=tensor([[[0.3734, 0.5252, 0.2296, 0.3614, 0.4566, 0.7025, 0.4621, 0.5936,\n 0.4810, 0.4749, 0.4758, 0.4074, 0....5, 0.4463, 0.5515, 0.3774, 0.4942, 0.4500,\n 0.5380, 0.4869, 0.5601, 0.5192, 0.5016, 0.5524, 0.4290, 0.5839]]]), calculated=tensor([[[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0000, 0.0000, 0.0000, 0.0000, 0....8, 0.4434, 0.5469, 0.3750, 0.4922, 0.4473,\n 0.5352, 0.4824, 0.5586, 0.5156, 0.4980, 0.5469, 0.4258, 0.5781]]]), pcc=0.99)
Raw output
input_shape = (1, 12, 32), dim = -2
@pytest.mark.parametrize(
"input_shape",
[
(1, 32, 12),
(1, 12, 32),
(1, 12, 3200),
(1, 32, 32),
(1, 64, 64),
(1, 128, 128, 128),
],
)
@pytest.mark.parametrize(
"dim",
[
-1,
-2,
],
)
@pytest.mark.push
def test_reduce_mean(input_shape, dim):
if input_shape == (1, 12, 3200) and dim == -1:
# Tensor mismatch(PCC: 0.72) - https://github.com/tenstorrent/tt-mlir/issues/869
pytest.xfail("Tensor mismatch between PyTorch and TTNN (PCC: 0.72)")
class ReduceMean(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
# reduce is supported on tt-metal only with keepdim=True
return torch.mean(a, dim=dim, keepdim=True)
inputs = [torch.rand(input_shape)]
framework_model = ReduceMean()
fw_out = framework_model(*inputs)
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
co_out = compiled_model(*inputs)
co_out = [co.to("cpu") for co in co_out]
> assert compare_with_golden_pcc(golden=fw_out, calculated=co_out[0], pcc=0.99)
E assert False
E + where False = compare_with_golden_pcc(golden=tensor([[[0.3734, 0.5252, 0.2296, 0.3614, 0.4566, 0.7025, 0.4621, 0.5936,\n 0.4810, 0.4749, 0.4758, 0.4074, 0....5, 0.4463, 0.5515, 0.3774, 0.4942, 0.4500,\n 0.5380, 0.4869, 0.5601, 0.5192, 0.5016, 0.5524, 0.4290, 0.5839]]]), calculated=tensor([[[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0000, 0.0000, 0.0000, 0.0000, 0....8, 0.4434, 0.5469, 0.3750, 0.4922, 0.4473,\n 0.5352, 0.4824, 0.5586, 0.5156, 0.4980, 0.5469, 0.4258, 0.5781]]]), pcc=0.99)
forge/test/mlir/test_ops.py:975: AssertionError
Check failure on line 975 in forge/test/mlir/test_ops.py
github-actions / TT-Forge-FE Tests
test_ops.test_reduce_mean[-2-input_shape2]
assert False
+ where False = compare_with_golden_pcc(golden=tensor([[[0.4932, 0.5093, 0.4791, ..., 0.3953, 0.4038, 0.3925]]]), calculated=tensor([[[0.0000, 0.0000, 0.0000, ..., 0.3926, 0.4004, 0.3906]]]), pcc=0.99)
Raw output
input_shape = (1, 12, 3200), dim = -2
@pytest.mark.parametrize(
"input_shape",
[
(1, 32, 12),
(1, 12, 32),
(1, 12, 3200),
(1, 32, 32),
(1, 64, 64),
(1, 128, 128, 128),
],
)
@pytest.mark.parametrize(
"dim",
[
-1,
-2,
],
)
@pytest.mark.push
def test_reduce_mean(input_shape, dim):
if input_shape == (1, 12, 3200) and dim == -1:
# Tensor mismatch(PCC: 0.72) - https://github.com/tenstorrent/tt-mlir/issues/869
pytest.xfail("Tensor mismatch between PyTorch and TTNN (PCC: 0.72)")
class ReduceMean(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
# reduce is supported on tt-metal only with keepdim=True
return torch.mean(a, dim=dim, keepdim=True)
inputs = [torch.rand(input_shape)]
framework_model = ReduceMean()
fw_out = framework_model(*inputs)
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
co_out = compiled_model(*inputs)
co_out = [co.to("cpu") for co in co_out]
> assert compare_with_golden_pcc(golden=fw_out, calculated=co_out[0], pcc=0.99)
E assert False
E + where False = compare_with_golden_pcc(golden=tensor([[[0.4932, 0.5093, 0.4791, ..., 0.3953, 0.4038, 0.3925]]]), calculated=tensor([[[0.0000, 0.0000, 0.0000, ..., 0.3926, 0.4004, 0.3906]]]), pcc=0.99)
forge/test/mlir/test_ops.py:975: AssertionError
Check failure on line 975 in forge/test/mlir/test_ops.py
github-actions / TT-Forge-FE Tests
test_ops.test_reduce_mean[-2-input_shape3]
assert False
+ where False = compare_with_golden_pcc(golden=tensor([[[0.4057, 0.4641, 0.4730, 0.4630, 0.4498, 0.6581, 0.5473, 0.5480,\n 0.5384, 0.4229, 0.5302, 0.3955, 0....8, 0.5294, 0.4579, 0.4330, 0.5893, 0.4396,\n 0.5653, 0.4455, 0.4778, 0.5779, 0.4682, 0.5186, 0.3983, 0.5889]]]), calculated=tensor([[[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0000, 0.0000, 0.0000, 0.0000, 0....2, 0.5273, 0.4570, 0.4316, 0.5859, 0.4414,\n 0.5625, 0.4453, 0.4785, 0.5781, 0.4668, 0.5156, 0.3965, 0.5859]]]), pcc=0.99)
Raw output
input_shape = (1, 32, 32), dim = -2
@pytest.mark.parametrize(
"input_shape",
[
(1, 32, 12),
(1, 12, 32),
(1, 12, 3200),
(1, 32, 32),
(1, 64, 64),
(1, 128, 128, 128),
],
)
@pytest.mark.parametrize(
"dim",
[
-1,
-2,
],
)
@pytest.mark.push
def test_reduce_mean(input_shape, dim):
if input_shape == (1, 12, 3200) and dim == -1:
# Tensor mismatch(PCC: 0.72) - https://github.com/tenstorrent/tt-mlir/issues/869
pytest.xfail("Tensor mismatch between PyTorch and TTNN (PCC: 0.72)")
class ReduceMean(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
# reduce is supported on tt-metal only with keepdim=True
return torch.mean(a, dim=dim, keepdim=True)
inputs = [torch.rand(input_shape)]
framework_model = ReduceMean()
fw_out = framework_model(*inputs)
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
co_out = compiled_model(*inputs)
co_out = [co.to("cpu") for co in co_out]
> assert compare_with_golden_pcc(golden=fw_out, calculated=co_out[0], pcc=0.99)
E assert False
E + where False = compare_with_golden_pcc(golden=tensor([[[0.4057, 0.4641, 0.4730, 0.4630, 0.4498, 0.6581, 0.5473, 0.5480,\n 0.5384, 0.4229, 0.5302, 0.3955, 0....8, 0.5294, 0.4579, 0.4330, 0.5893, 0.4396,\n 0.5653, 0.4455, 0.4778, 0.5779, 0.4682, 0.5186, 0.3983, 0.5889]]]), calculated=tensor([[[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0000, 0.0000, 0.0000, 0.0000, 0....2, 0.5273, 0.4570, 0.4316, 0.5859, 0.4414,\n 0.5625, 0.4453, 0.4785, 0.5781, 0.4668, 0.5156, 0.3965, 0.5859]]]), pcc=0.99)
forge/test/mlir/test_ops.py:975: AssertionError
Check failure on line 975 in forge/test/mlir/test_ops.py
github-actions / TT-Forge-FE Tests
test_ops.test_reduce_mean[-2-input_shape5]
assert False
+ where False = compare_with_golden_pcc(golden=tensor([[[[0.4590, 0.4941, 0.5337, ..., 0.5295, 0.5333, 0.5018]],\n\n [[0.5215, 0.5193, 0.5045, ..., 0.4914, 0..., 0.4723, 0.5327, ..., 0.5562, 0.5554, 0.5610]],\n\n [[0.5040, 0.5062, 0.4709, ..., 0.5116, 0.4936, 0.5074]]]]), calculated=tensor([[[[0.0000, 0.0000, 0.0000, ..., 0.5312, 0.5312, 0.5000]],\n\n [[0.0000, 0.0000, 0.0000, ..., 0.4922, 0..., 0.4746, 0.5312, ..., 0.5586, 0.5586, 0.5625]],\n\n [[0.5039, 0.5039, 0.4727, ..., 0.5117, 0.4941, 0.5078]]]]), pcc=0.99)
Raw output
input_shape = (1, 128, 128, 128), dim = -2
@pytest.mark.parametrize(
"input_shape",
[
(1, 32, 12),
(1, 12, 32),
(1, 12, 3200),
(1, 32, 32),
(1, 64, 64),
(1, 128, 128, 128),
],
)
@pytest.mark.parametrize(
"dim",
[
-1,
-2,
],
)
@pytest.mark.push
def test_reduce_mean(input_shape, dim):
if input_shape == (1, 12, 3200) and dim == -1:
# Tensor mismatch(PCC: 0.72) - https://github.com/tenstorrent/tt-mlir/issues/869
pytest.xfail("Tensor mismatch between PyTorch and TTNN (PCC: 0.72)")
class ReduceMean(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
# reduce is supported on tt-metal only with keepdim=True
return torch.mean(a, dim=dim, keepdim=True)
inputs = [torch.rand(input_shape)]
framework_model = ReduceMean()
fw_out = framework_model(*inputs)
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
co_out = compiled_model(*inputs)
co_out = [co.to("cpu") for co in co_out]
> assert compare_with_golden_pcc(golden=fw_out, calculated=co_out[0], pcc=0.99)
E assert False
E + where False = compare_with_golden_pcc(golden=tensor([[[[0.4590, 0.4941, 0.5337, ..., 0.5295, 0.5333, 0.5018]],\n\n [[0.5215, 0.5193, 0.5045, ..., 0.4914, 0..., 0.4723, 0.5327, ..., 0.5562, 0.5554, 0.5610]],\n\n [[0.5040, 0.5062, 0.4709, ..., 0.5116, 0.4936, 0.5074]]]]), calculated=tensor([[[[0.0000, 0.0000, 0.0000, ..., 0.5312, 0.5312, 0.5000]],\n\n [[0.0000, 0.0000, 0.0000, ..., 0.4922, 0..., 0.4746, 0.5312, ..., 0.5586, 0.5586, 0.5625]],\n\n [[0.5039, 0.5039, 0.4727, ..., 0.5117, 0.4941, 0.5078]]]]), pcc=0.99)
forge/test/mlir/test_ops.py:975: AssertionError
Check failure on line 1305 in forge/test/mlir/test_ops.py
github-actions / TT-Forge-FE Tests
test_ops.test_reduce_max[1-input_shape2]
assert False
+ where False = compare_with_golden_pcc(golden=tensor([[[0.9991, 0.9998, 0.9553, 0.9867, 0.9970, 0.9913, 0.9886, 0.9793,\n 0.9980, 0.9652, 0.9992, 0.9523, 0....5, 0.9490, 0.9918, 0.9918, 0.9600, 0.8830,\n 0.9983, 0.9372, 0.9667, 0.9933, 0.9952, 0.9849, 0.9893, 0.9789]]]), calculated=tensor([[[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0000, 0.0000, 0.0000, 0.0000, 0....2, 0.9453, 0.9883, 0.9883, 0.9570, 0.8828,\n 0.9961, 0.9336, 0.9648, 0.9922, 0.9922, 0.9844, 0.9883, 0.9766]]]), pcc=0.99)
Raw output
input_shape = (2, 32, 64), dim = 1
@pytest.mark.parametrize(
"input_shape",
[
(2, 32, 64, 64),
(3, 22, 37, 41),
(2, 32, 64),
(3, 22, 37),
],
)
@pytest.mark.parametrize(
"dim",
[
0,
1,
2,
3,
-1,
-2,
-3,
-4,
],
)
@pytest.mark.push
def test_reduce_max(input_shape, dim):
reduce_max_dim = dim
if reduce_max_dim < 0:
reduce_max_dim = reduce_max_dim + len(input_shape)
if (reduce_max_dim < 0) or (reduce_max_dim >= len(input_shape)):
pytest.skip()
if (input_shape in [(2, 32, 64, 64), (3, 22, 37, 41)] and dim in [0, -4, 1, -3]) or (
input_shape in [(2, 32, 64), (3, 22, 37)] and dim in [0, -3]
):
pytest.xfail("TTNN Issue: Unsupported dim")
# TTNN Max issues:
# Unsupported dim - https://github.com/tenstorrent/tt-metal/issues/13186
# Shape mismatch along the H and W dimension - https://github.com/tenstorrent/tt-metal/issues/13189
# Tensor rank is not 4 - https://github.com/tenstorrent/tt-metal/issues/13190
class ReduceMax(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
return torch.max(a, dim=dim, keepdim=True)[0]
inputs = [torch.rand(input_shape)]
framework_model = ReduceMax()
framework_model.eval()
fw_out = framework_model(*inputs)
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
co_out = compiled_model(*inputs)
co_out = [co.to("cpu") for co in co_out]
> assert compare_with_golden_pcc(golden=fw_out, calculated=co_out[0], pcc=0.99)
E assert False
E + where False = compare_with_golden_pcc(golden=tensor([[[0.9991, 0.9998, 0.9553, 0.9867, 0.9970, 0.9913, 0.9886, 0.9793,\n 0.9980, 0.9652, 0.9992, 0.9523, 0....5, 0.9490, 0.9918, 0.9918, 0.9600, 0.8830,\n 0.9983, 0.9372, 0.9667, 0.9933, 0.9952, 0.9849, 0.9893, 0.9789]]]), calculated=tensor([[[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0000, 0.0000, 0.0000, 0.0000, 0....2, 0.9453, 0.9883, 0.9883, 0.9570, 0.8828,\n 0.9961, 0.9336, 0.9648, 0.9922, 0.9922, 0.9844, 0.9883, 0.9766]]]), pcc=0.99)
forge/test/mlir/test_ops.py:1305: AssertionError
Check failure on line 1305 in forge/test/mlir/test_ops.py
github-actions / TT-Forge-FE Tests
test_ops.test_reduce_max[1-input_shape3]
assert False
+ where False = compare_with_golden_pcc(golden=tensor([[[0.9351, 0.9992, 0.9537, 0.9025, 0.9998, 0.9630, 0.9737, 0.9514,\n 0.9970, 0.9124, 0.9715, 0.9883, 0.... 0.9522, 0.8956, 0.9840, 0.9490, 0.9959, 0.9850, 0.9866, 0.9828,\n 0.9832, 0.9787, 0.8050, 0.9891, 0.9601]]]), calculated=tensor([[[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0000, 0.0000, 0.0000, 0.0000, 0.... 0.9492, 0.8945, 0.9805, 0.9453, 0.9922, 0.9844, 0.9844, 0.9805,\n 0.9805, 0.9766, 0.8047, 0.9883, 0.9570]]]), pcc=0.99)
Raw output
input_shape = (3, 22, 37), dim = 1
@pytest.mark.parametrize(
"input_shape",
[
(2, 32, 64, 64),
(3, 22, 37, 41),
(2, 32, 64),
(3, 22, 37),
],
)
@pytest.mark.parametrize(
"dim",
[
0,
1,
2,
3,
-1,
-2,
-3,
-4,
],
)
@pytest.mark.push
def test_reduce_max(input_shape, dim):
reduce_max_dim = dim
if reduce_max_dim < 0:
reduce_max_dim = reduce_max_dim + len(input_shape)
if (reduce_max_dim < 0) or (reduce_max_dim >= len(input_shape)):
pytest.skip()
if (input_shape in [(2, 32, 64, 64), (3, 22, 37, 41)] and dim in [0, -4, 1, -3]) or (
input_shape in [(2, 32, 64), (3, 22, 37)] and dim in [0, -3]
):
pytest.xfail("TTNN Issue: Unsupported dim")
# TTNN Max issues:
# Unsupported dim - https://github.com/tenstorrent/tt-metal/issues/13186
# Shape mismatch along the H and W dimension - https://github.com/tenstorrent/tt-metal/issues/13189
# Tensor rank is not 4 - https://github.com/tenstorrent/tt-metal/issues/13190
class ReduceMax(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
return torch.max(a, dim=dim, keepdim=True)[0]
inputs = [torch.rand(input_shape)]
framework_model = ReduceMax()
framework_model.eval()
fw_out = framework_model(*inputs)
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
co_out = compiled_model(*inputs)
co_out = [co.to("cpu") for co in co_out]
> assert compare_with_golden_pcc(golden=fw_out, calculated=co_out[0], pcc=0.99)
E assert False
E + where False = compare_with_golden_pcc(golden=tensor([[[0.9351, 0.9992, 0.9537, 0.9025, 0.9998, 0.9630, 0.9737, 0.9514,\n 0.9970, 0.9124, 0.9715, 0.9883, 0.... 0.9522, 0.8956, 0.9840, 0.9490, 0.9959, 0.9850, 0.9866, 0.9828,\n 0.9832, 0.9787, 0.8050, 0.9891, 0.9601]]]), calculated=tensor([[[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0000, 0.0000, 0.0000, 0.0000, 0.... 0.9492, 0.8945, 0.9805, 0.9453, 0.9922, 0.9844, 0.9844, 0.9805,\n 0.9805, 0.9766, 0.8047, 0.9883, 0.9570]]]), pcc=0.99)
forge/test/mlir/test_ops.py:1305: AssertionError
Check failure on line 1305 in forge/test/mlir/test_ops.py
github-actions / TT-Forge-FE Tests
test_ops.test_reduce_max[-2-input_shape1]
assert False
+ where False = compare_with_golden_pcc(golden=tensor([[[[0.9952, 0.9970, 0.9523, ..., 0.9991, 0.9883, 0.9992]],\n\n [[0.9853, 0.9439, 0.9525, ..., 0.9142, 0..., 0.9987, 0.8916, ..., 0.9881, 0.9813, 0.9702]],\n\n [[0.9821, 0.9527, 0.9492, ..., 0.9769, 0.9855, 0.9630]]]]), calculated=tensor([[[[0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000]],\n\n [[0.0000, 0.0000, 0.0000, ..., 0.9141, 0..., 0.9961, 0.8906, ..., 0.9844, 0.9805, 0.9688]],\n\n [[0.9805, 0.9492, 0.9492, ..., 0.9766, 0.9844, 0.9609]]]]), pcc=0.99)
Raw output
input_shape = (3, 22, 37, 41), dim = -2
@pytest.mark.parametrize(
"input_shape",
[
(2, 32, 64, 64),
(3, 22, 37, 41),
(2, 32, 64),
(3, 22, 37),
],
)
@pytest.mark.parametrize(
"dim",
[
0,
1,
2,
3,
-1,
-2,
-3,
-4,
],
)
@pytest.mark.push
def test_reduce_max(input_shape, dim):
reduce_max_dim = dim
if reduce_max_dim < 0:
reduce_max_dim = reduce_max_dim + len(input_shape)
if (reduce_max_dim < 0) or (reduce_max_dim >= len(input_shape)):
pytest.skip()
if (input_shape in [(2, 32, 64, 64), (3, 22, 37, 41)] and dim in [0, -4, 1, -3]) or (
input_shape in [(2, 32, 64), (3, 22, 37)] and dim in [0, -3]
):
pytest.xfail("TTNN Issue: Unsupported dim")
# TTNN Max issues:
# Unsupported dim - https://github.com/tenstorrent/tt-metal/issues/13186
# Shape mismatch along the H and W dimension - https://github.com/tenstorrent/tt-metal/issues/13189
# Tensor rank is not 4 - https://github.com/tenstorrent/tt-metal/issues/13190
class ReduceMax(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
return torch.max(a, dim=dim, keepdim=True)[0]
inputs = [torch.rand(input_shape)]
framework_model = ReduceMax()
framework_model.eval()
fw_out = framework_model(*inputs)
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
co_out = compiled_model(*inputs)
co_out = [co.to("cpu") for co in co_out]
> assert compare_with_golden_pcc(golden=fw_out, calculated=co_out[0], pcc=0.99)
E assert False
E + where False = compare_with_golden_pcc(golden=tensor([[[[0.9952, 0.9970, 0.9523, ..., 0.9991, 0.9883, 0.9992]],\n\n [[0.9853, 0.9439, 0.9525, ..., 0.9142, 0..., 0.9987, 0.8916, ..., 0.9881, 0.9813, 0.9702]],\n\n [[0.9821, 0.9527, 0.9492, ..., 0.9769, 0.9855, 0.9630]]]]), calculated=tensor([[[[0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000]],\n\n [[0.0000, 0.0000, 0.0000, ..., 0.9141, 0..., 0.9961, 0.8906, ..., 0.9844, 0.9805, 0.9688]],\n\n [[0.9805, 0.9492, 0.9492, ..., 0.9766, 0.9844, 0.9609]]]]), pcc=0.99)
forge/test/mlir/test_ops.py:1305: AssertionError
Check failure on line 1305 in forge/test/mlir/test_ops.py
github-actions / TT-Forge-FE Tests
test_ops.test_reduce_max[-2-input_shape3]
assert False
+ where False = compare_with_golden_pcc(golden=tensor([[[0.9351, 0.9992, 0.9537, 0.9025, 0.9998, 0.9630, 0.9737, 0.9514,\n 0.9970, 0.9124, 0.9715, 0.9883, 0.... 0.9522, 0.8956, 0.9840, 0.9490, 0.9959, 0.9850, 0.9866, 0.9828,\n 0.9832, 0.9787, 0.8050, 0.9891, 0.9601]]]), calculated=tensor([[[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0000, 0.0000, 0.0000, 0.0000, 0.... 0.9492, 0.8945, 0.9805, 0.9453, 0.9922, 0.9844, 0.9844, 0.9805,\n 0.9805, 0.9766, 0.8047, 0.9883, 0.9570]]]), pcc=0.99)
Raw output
input_shape = (3, 22, 37), dim = -2
@pytest.mark.parametrize(
"input_shape",
[
(2, 32, 64, 64),
(3, 22, 37, 41),
(2, 32, 64),
(3, 22, 37),
],
)
@pytest.mark.parametrize(
"dim",
[
0,
1,
2,
3,
-1,
-2,
-3,
-4,
],
)
@pytest.mark.push
def test_reduce_max(input_shape, dim):
reduce_max_dim = dim
if reduce_max_dim < 0:
reduce_max_dim = reduce_max_dim + len(input_shape)
if (reduce_max_dim < 0) or (reduce_max_dim >= len(input_shape)):
pytest.skip()
if (input_shape in [(2, 32, 64, 64), (3, 22, 37, 41)] and dim in [0, -4, 1, -3]) or (
input_shape in [(2, 32, 64), (3, 22, 37)] and dim in [0, -3]
):
pytest.xfail("TTNN Issue: Unsupported dim")
# TTNN Max issues:
# Unsupported dim - https://github.com/tenstorrent/tt-metal/issues/13186
# Shape mismatch along the H and W dimension - https://github.com/tenstorrent/tt-metal/issues/13189
# Tensor rank is not 4 - https://github.com/tenstorrent/tt-metal/issues/13190
class ReduceMax(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
return torch.max(a, dim=dim, keepdim=True)[0]
inputs = [torch.rand(input_shape)]
framework_model = ReduceMax()
framework_model.eval()
fw_out = framework_model(*inputs)
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
co_out = compiled_model(*inputs)
co_out = [co.to("cpu") for co in co_out]
> assert compare_with_golden_pcc(golden=fw_out, calculated=co_out[0], pcc=0.99)
E assert False
E + where False = compare_with_golden_pcc(golden=tensor([[[0.9351, 0.9992, 0.9537, 0.9025, 0.9998, 0.9630, 0.9737, 0.9514,\n 0.9970, 0.9124, 0.9715, 0.9883, 0.... 0.9522, 0.8956, 0.9840, 0.9490, 0.9959, 0.9850, 0.9866, 0.9828,\n 0.9832, 0.9787, 0.8050, 0.9891, 0.9601]]]), calculated=tensor([[[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0000, 0.0000, 0.0000, 0.0000, 0.... 0.9492, 0.8945, 0.9805, 0.9453, 0.9922, 0.9844, 0.9844, 0.9805,\n 0.9805, 0.9766, 0.8047, 0.9883, 0.9570]]]), pcc=0.99)
forge/test/mlir/test_ops.py:1305: AssertionError