diff --git a/ttnn/cpp/ttnn/operations/eltwise/binary/binary_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/binary/binary_pybind.hpp index 8c6bb9291a6..919b624b39b 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/binary/binary_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/binary/binary_pybind.hpp @@ -626,13 +626,13 @@ void bind_inplace_operation(py::module& module, const binary_operation_t& operat } template -void bind_logical_inplace_operation(py::module& module, const binary_operation_t& operation, const std::string& description, const std::string& note=" ") { +void bind_logical_inplace_operation(py::module& module, const binary_operation_t& operation, const std::string& description, const std::string& math, const std::string& note=" ") { auto doc = fmt::format( R"doc( {2} .. math:: - \mathrm{{output\_tensor}} = \verb|{0}|(\mathrm{{input\_tensor\_a,input\_tensor\_b}}). + {3} Args: input_tensor_a (ttnn.Tensor): the input tensor. @@ -642,7 +642,7 @@ void bind_logical_inplace_operation(py::module& module, const binary_operation_t List of ttnn.Tensor: the output tensor. Note: - {3} + {4} Example: >>> tensor1 = ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16), device=device) @@ -652,6 +652,7 @@ void bind_logical_inplace_operation(py::module& module, const binary_operation_t operation.base_name(), operation.python_fully_qualified_name(), description, + math, note); bind_registered_operation( @@ -980,9 +981,7 @@ void py_module(py::module& module) { module, ttnn::logical_xor, R"doc(Compute logical_xor :attr:`input_tensor_a` and :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`)doc", - R"doc(\mathrm{output\_tensor}_i = (\mathrm{input\_tensor\_a}_i \land \lnot \mathrm{input\_tensor\_b}_i) \lor (\lnot \mathrm{input\_tensor\_a}_i \land \mathrm{input\_tensor\_b}_i) - )doc", - + R"doc(\mathrm{output\_tensor}_i = (\mathrm{input\_tensor\_a}_i \land \lnot \mathrm{input\_tensor\_b}_i) \lor (\lnot \mathrm{input\_tensor\_a}_i \land \mathrm{input\_tensor\_b}_i))doc",".", R"doc(Supported dtypes, layouts, and ranks: +----------------------------+---------------------------------+-------------------+ @@ -996,6 +995,7 @@ void py_module(py::module& module) { module, ttnn::logical_or_, R"doc(Compute inplace logical OR of :attr:`input_tensor_a` and :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`)doc", + R"doc((\mathrm{{input\_tensor\_a}}_i | \mathrm{{input\_tensor\_b}}_i))doc", R"doc(Supported dtypes, layouts, and ranks: @@ -1010,6 +1010,7 @@ void py_module(py::module& module) { module, ttnn::logical_xor_, R"doc(Compute inplace logical XOR of :attr:`input_tensor_a` and :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`)doc", + R"doc((\mathrm{input\_tensor\_a}_i \land \lnot \mathrm{input\_tensor\_b}_i) \lor (\lnot \mathrm{input\_tensor\_a}_i \land \mathrm{input\_tensor\_b}_i))doc", R"doc(Supported dtypes, layouts, and ranks: @@ -1024,6 +1025,7 @@ void py_module(py::module& module) { module, ttnn::logical_and_, R"doc(Compute inplace logical AND of :attr:`input_tensor_a` and :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`)doc", + R"doc((\mathrm{{input\_tensor\_a}}_i \& \mathrm{{input\_tensor\_b}}_i))doc", R"doc(Supported dtypes, layouts, and ranks: diff --git a/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_composite_op.cpp b/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_composite_op.cpp index c0a516b34cb..ef74e088e1a 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_composite_op.cpp +++ b/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_composite_op.cpp @@ -151,13 +151,6 @@ Tensor _atan2(const Tensor& input_a, const Tensor& input_b, const std::optional< return res; } -// Tensor _logical_xor(const Tensor& input_a, const Tensor& input_b, const std::optional& output_mem_config) { -// Tensor in_a_eq_zero = ttnn::eqz(input_a, output_mem_config); -// Tensor in_b_eq_zero = ttnn::eqz(input_b, output_mem_config); -// Tensor in_b_neq_zero = ttnn::nez(input_b, output_mem_config); -// Tensor result = ttnn::where(in_a_eq_zero, in_b_neq_zero, in_b_eq_zero); -// return result; -//} Tensor ExecuteDiv::invoke(uint8_t queue_id, const Tensor& input, float value, bool accurate_mode, const std::string& round_mode, const std::optional& output_mem_config, std::optional output_tensor) { TT_FATAL((round_mode == "None" || round_mode == "trunc" || round_mode == "floor"), "Incorrect rounding mode (expected 'None', 'trunc', or 'floor')"); @@ -324,13 +317,6 @@ Tensor _floor_div(const Tensor& input_a, const Tensor& input_b, const std::optio result); } -// Tensor _logical_xor_(const Tensor& input_a, const Tensor& input_b, const std::optional& output_mem_config) { -// Tensor in_a_eq_zero = ttnn::eqz(input_a, output_mem_config, input_a ); -// Tensor in_b_eq_zero = ttnn::nez(input_b, output_mem_config, input_b ); -// in_b_eq_zero = ttnn::eqz(input_b, output_mem_config); -// Tensor result = ttnn::where(input_a, input_b, in_b_eq_zero, output_mem_config, input_a); -// return result; -// } Tensor _scatter(const Tensor& input_a, const Tensor& input_b, const std::optional& output_mem_config) { tt::tt_metal::Array4D start_index = {0, 0, 0, 0};