Skip to content

Commit

Permalink
builds again
Browse files Browse the repository at this point in the history
  • Loading branch information
mbezuljTT committed Nov 1, 2024
1 parent d9abee9 commit 2cb9e25
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 13 deletions.
2 changes: 1 addition & 1 deletion runtime/lib/common/system_desc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ static ::tt::target::Arch toFlatbuffer(::tt::ARCH arch) {
}

static std::vector<::tt::target::ChipChannel>
getAllDeviceConnections(const vector<::tt::tt_metal::Device *> &devices) {
getAllDeviceConnections(const std::vector<::tt::tt_metal::Device *> &devices) {
std::set<std::tuple<chip_id_t, CoreCoord, chip_id_t, CoreCoord>>
connectionSet;

Expand Down
7 changes: 4 additions & 3 deletions runtime/lib/ttnn/operations/data_movement/slice.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,10 @@ void run(const ::tt::target::ttnn::SliceOp *op, ProgramContext &context) {
ProgramTensorPool &tensorPool = context.getTensorPool();
const ::ttnn::Tensor &in = tensorPool.at(op->in()->global_id());
DEBUG_ASSERT(in.is_allocated());
std::vector<int32_t> begins(op->begins()->begin(), op->begins()->end());
std::vector<int32_t> ends(op->ends()->begin(), op->ends()->end());
std::vector<int32_t> step(op->step()->begin(), op->step()->end());
::ttnn::SmallVector<int32_t> begins(op->begins()->begin(),
op->begins()->end());
::ttnn::SmallVector<int32_t> ends(op->ends()->begin(), op->ends()->end());
::ttnn::SmallVector<int32_t> step(op->step()->begin(), op->step()->end());

::ttnn::Tensor out = ::ttnn::slice(in, begins, ends, step);
tensorPool.insert_or_assign(op->out()->global_id(), out);
Expand Down
4 changes: 2 additions & 2 deletions runtime/lib/ttnn/operations/pool/maxpool2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ preshardForMaxPool2d(const ::tt::target::ttnn::MaxPool2dOp *op,
auto parallel_config =
::ttnn::operations::conv::conv2d::determine_parallel_config(
::ttnn::TensorMemoryLayout::HEIGHT_SHARDED, op->batch_size(),
op->channels(), output_height, output_width, op->channels(), &device,
ShardOrientation::ROW_MAJOR);
op->channels(), output_height, output_width, op->channels(),
device.compute_with_storage_grid_size(), ShardOrientation::ROW_MAJOR);
auto sharded_memory_config = ::ttnn::operations::conv::conv2d::
create_sharded_memory_config_from_parallel_config(inputShape,
parallel_config, 1);
Expand Down
10 changes: 5 additions & 5 deletions runtime/lib/ttnn/operations/reduction/reduction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ static void runReductionOp(
::tt::target::ttnn::ReductionOp const *op, ProgramTensorPool &tensorPool,
std::function<::ttnn::Tensor(
const ::ttnn::Tensor &,
const std::optional<std::variant<int, std::vector<int>>> &, const bool,
const std::optional<::tt::tt_metal::MemoryConfig> &,
const std::optional<std::variant<int, ::ttnn::SmallVector<int>>> &,
const bool, const std::optional<::tt::tt_metal::MemoryConfig> &,
const std::optional<::ttnn::DeviceComputeKernelConfig> &, float)>
ttnnOp) {
::tt::tt_metal::MemoryConfig outputMemoryConfig =
Expand All @@ -22,9 +22,9 @@ static void runReductionOp(
DEBUG_ASSERT(in.is_allocated());

const auto *fbDimArg = op->dim_arg();
std::optional<vector<int>> dimArg =
fbDimArg ? std::make_optional(
std::vector<int>(fbDimArg->begin(), fbDimArg->end()))
std::optional<::ttnn::SmallVector<int>> dimArg =
fbDimArg ? std::make_optional(::ttnn::SmallVector<int>(fbDimArg->begin(),
fbDimArg->end()))
: std::nullopt;

::ttnn::Tensor out = ttnnOp(
Expand Down
12 changes: 10 additions & 2 deletions runtime/lib/ttnn/runtime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
#include "tt/runtime/utils.h"
#include "ttmlir/Target/TTNN/Target.h"
#include "ttmlir/Version.h"
#include "ttnn/tensor/shape/small_vector.hpp"
#include "ttnn/tensor/types.hpp"

namespace tt::runtime::ttnn {

using ::tt::runtime::DeviceRuntime;
Expand Down Expand Up @@ -45,9 +48,14 @@ Tensor createTensor(std::shared_ptr<void> data,
std::vector<std::uint32_t> const &stride,
std::uint32_t itemsize, ::tt::target::DataType dataType) {
std::uint32_t numElements = shape[0] * stride[0];

::tt::tt_metal::SmallVector<uint32_t> small_vector_shape(shape.begin(),
shape.end());

auto tensor = std::make_shared<::ttnn::Tensor>(
createStorage(data.get(), numElements, dataType), shape,
utils::toTTNNDataType(dataType), ::ttnn::Layout::ROW_MAJOR);
createStorage(data.get(), numElements, dataType),
::ttnn::Shape(small_vector_shape), utils::toTTNNDataType(dataType),
::ttnn::Layout::ROW_MAJOR);
return Tensor(tensor, data, DeviceRuntime::TTNN);
}

Expand Down

0 comments on commit 2cb9e25

Please sign in to comment.