Skip to content

Commit

Permalink
#13758: Update logical op documentation
Browse files Browse the repository at this point in the history
  • Loading branch information
umadevimcw committed Oct 28, 2024
1 parent 5978dea commit 14cd5a5
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 20 deletions.
14 changes: 8 additions & 6 deletions ttnn/cpp/ttnn/operations/eltwise/binary/binary_pybind.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -626,13 +626,13 @@ void bind_inplace_operation(py::module& module, const binary_operation_t& operat
}

template <typename binary_operation_t>
void bind_logical_inplace_operation(py::module& module, const binary_operation_t& operation, const std::string& description, const std::string& note=" ") {
void bind_logical_inplace_operation(py::module& module, const binary_operation_t& operation, const std::string& description, const std::string& math, const std::string& note=" ") {
auto doc = fmt::format(
R"doc(
{2}
.. math::
\mathrm{{output\_tensor}} = \verb|{0}|(\mathrm{{input\_tensor\_a,input\_tensor\_b}}).
{3}
Args:
input_tensor_a (ttnn.Tensor): the input tensor.
Expand All @@ -642,7 +642,7 @@ void bind_logical_inplace_operation(py::module& module, const binary_operation_t
List of ttnn.Tensor: the output tensor.
Note:
{3}
{4}
Example:
>>> tensor1 = ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16), device=device)
Expand All @@ -652,6 +652,7 @@ void bind_logical_inplace_operation(py::module& module, const binary_operation_t
operation.base_name(),
operation.python_fully_qualified_name(),
description,
math,
note);

bind_registered_operation(
Expand Down Expand Up @@ -980,9 +981,7 @@ void py_module(py::module& module) {
module,
ttnn::logical_xor,
R"doc(Compute logical_xor :attr:`input_tensor_a` and :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`)doc",
R"doc(\mathrm{output\_tensor}_i = (\mathrm{input\_tensor\_a}_i \land \lnot \mathrm{input\_tensor\_b}_i) \lor (\lnot \mathrm{input\_tensor\_a}_i \land \mathrm{input\_tensor\_b}_i)
)doc",

R"doc(\mathrm{output\_tensor}_i = (\mathrm{input\_tensor\_a}_i \land \lnot \mathrm{input\_tensor\_b}_i) \lor (\lnot \mathrm{input\_tensor\_a}_i \land \mathrm{input\_tensor\_b}_i))doc",".",
R"doc(Supported dtypes, layouts, and ranks:
+----------------------------+---------------------------------+-------------------+
Expand All @@ -996,6 +995,7 @@ void py_module(py::module& module) {
module,
ttnn::logical_or_,
R"doc(Compute inplace logical OR of :attr:`input_tensor_a` and :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`)doc",
R"doc((\mathrm{{input\_tensor\_a}}_i | \mathrm{{input\_tensor\_b}}_i))doc",

R"doc(Supported dtypes, layouts, and ranks:
Expand All @@ -1010,6 +1010,7 @@ void py_module(py::module& module) {
module,
ttnn::logical_xor_,
R"doc(Compute inplace logical XOR of :attr:`input_tensor_a` and :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`)doc",
R"doc((\mathrm{input\_tensor\_a}_i \land \lnot \mathrm{input\_tensor\_b}_i) \lor (\lnot \mathrm{input\_tensor\_a}_i \land \mathrm{input\_tensor\_b}_i))doc",

R"doc(Supported dtypes, layouts, and ranks:
Expand All @@ -1024,6 +1025,7 @@ void py_module(py::module& module) {
module,
ttnn::logical_and_,
R"doc(Compute inplace logical AND of :attr:`input_tensor_a` and :attr:`input_tensor_b` and returns the tensor with the same layout as :attr:`input_tensor_a`)doc",
R"doc((\mathrm{{input\_tensor\_a}}_i \& \mathrm{{input\_tensor\_b}}_i))doc",

R"doc(Supported dtypes, layouts, and ranks:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,13 +151,6 @@ Tensor _atan2(const Tensor& input_a, const Tensor& input_b, const std::optional<
return res;
}

// Tensor _logical_xor(const Tensor& input_a, const Tensor& input_b, const std::optional<MemoryConfig>& output_mem_config) {
// Tensor in_a_eq_zero = ttnn::eqz(input_a, output_mem_config);
// Tensor in_b_eq_zero = ttnn::eqz(input_b, output_mem_config);
// Tensor in_b_neq_zero = ttnn::nez(input_b, output_mem_config);
// Tensor result = ttnn::where(in_a_eq_zero, in_b_neq_zero, in_b_eq_zero);
// return result;
//}

Tensor ExecuteDiv::invoke(uint8_t queue_id, const Tensor& input, float value, bool accurate_mode, const std::string& round_mode, const std::optional<MemoryConfig>& output_mem_config, std::optional<Tensor> output_tensor) {
TT_FATAL((round_mode == "None" || round_mode == "trunc" || round_mode == "floor"), "Incorrect rounding mode (expected 'None', 'trunc', or 'floor')");
Expand Down Expand Up @@ -324,13 +317,6 @@ Tensor _floor_div(const Tensor& input_a, const Tensor& input_b, const std::optio
result);
}

// Tensor _logical_xor_(const Tensor& input_a, const Tensor& input_b, const std::optional<MemoryConfig>& output_mem_config) {
// Tensor in_a_eq_zero = ttnn::eqz(input_a, output_mem_config, input_a );
// Tensor in_b_eq_zero = ttnn::nez(input_b, output_mem_config, input_b );
// in_b_eq_zero = ttnn::eqz(input_b, output_mem_config);
// Tensor result = ttnn::where(input_a, input_b, in_b_eq_zero, output_mem_config, input_a);
// return result;
// }

Tensor _scatter(const Tensor& input_a, const Tensor& input_b, const std::optional<MemoryConfig>& output_mem_config) {
tt::tt_metal::Array4D start_index = {0, 0, 0, 0};
Expand Down

0 comments on commit 14cd5a5

Please sign in to comment.