From cc80fec1ca9818eca42545a80f67587ed6d17780 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 5 Mar 2025 18:57:47 +0000 Subject: [PATCH 1/4] Switched to Nanobind + Good Build --- include/ttmlir/Bindings/Python/TTMLIRModule.h | 37 +-- .../Dialect/TTNN/Utils/OptimizerOverrides.h | 11 +- lib/Dialect/TTNN/Utils/OptimizerOverrides.cpp | 8 +- python/CMakeLists.txt | 1 + python/OptimizerOverrides.cpp | 69 ++--- python/Overrides.cpp | 4 +- python/Passes.cpp | 107 +++----- python/TTIRModule.cpp | 2 +- python/TTKernelModule.cpp | 14 +- python/TTMLIRModule.cpp | 4 +- python/TTModule.cpp | 253 +++++++++--------- python/TTNNModule.cpp | 79 +++--- python/Util.cpp | 9 +- 13 files changed, 281 insertions(+), 317 deletions(-) diff --git a/include/ttmlir/Bindings/Python/TTMLIRModule.h b/include/ttmlir/Bindings/Python/TTMLIRModule.h index 49ec0e912b..a01ae0dc84 100644 --- a/include/ttmlir/Bindings/Python/TTMLIRModule.h +++ b/include/ttmlir/Bindings/Python/TTMLIRModule.h @@ -6,7 +6,8 @@ #define TTMLIR_BINDINGS_PYTHON_TTMLIRMODULE_H #include "mlir-c/Bindings/Python/Interop.h" -#include "mlir/Bindings/Python/PybindAdaptors.h" +#include "mlir/Bindings/Python/Nanobind.h" +#include "mlir/Bindings/Python/NanobindAdaptors.h" #include "mlir/CAPI/IR.h" #include "mlir/InitAllDialects.h" #include "mlir/InitAllPasses.h" @@ -23,46 +24,46 @@ #include -namespace py = pybind11; +namespace nb = nanobind; namespace mlir::ttmlir::python { template -py::class_ tt_attribute_class(py::module &m, const char *class_name) { - py::class_ cls(m, class_name); +nb::class_ tt_attribute_class(nb::module_ &m, const char *class_name) { + nb::class_ cls(m, class_name); cls.def_static("maybe_downcast", - [](MlirAttribute attr) -> std::variant { + [](MlirAttribute attr) -> std::variant { auto res = mlir::dyn_cast(unwrap(attr)); if (res) { return res; } - return py::none(); + return nb::none(); }); return cls; } template -py::class_ tt_type_class(py::module &m, const char *class_name) { - py::class_ cls(m, class_name); +nb::class_ tt_type_class(nb::module_ &m, const char *class_name) { + nb::class_ cls(m, class_name); cls.def_static("maybe_downcast", - [](MlirType type) -> std::variant { + [](MlirType type) -> std::variant { auto res = mlir::dyn_cast(unwrap(type)); if (res) { return res; } - return py::none(); + return nb::none(); }); return cls; } -void populateTTModule(py::module &m); -void populateTTIRModule(py::module &m); -void populateTTKernelModule(py::module &m); -void populateTTNNModule(py::module &m); -void populateOverridesModule(py::module &m); -void populateOptimizerOverridesModule(py::module &m); -void populatePassesModule(py::module &m); -void populateUtilModule(py::module &m); +void populateTTModule(nb::module_ &m); +void populateTTIRModule(nb::module_ &m); +void populateTTKernelModule(nb::module_ &m); +void populateTTNNModule(nb::module_ &m); +void populateOverridesModule(nb::module_ &m); +void populateOptimizerOverridesModule(nb::module_ &m); +void populatePassesModule(nb::module_ &m); +void populateUtilModule(nb::module_ &m); } // namespace mlir::ttmlir::python #endif // TTMLIR_BINDINGS_PYTHON_TTMLIRMODULE_H diff --git a/include/ttmlir/Dialect/TTNN/Utils/OptimizerOverrides.h b/include/ttmlir/Dialect/TTNN/Utils/OptimizerOverrides.h index ba90bc2ec7..cbe1f3ebe7 100644 --- a/include/ttmlir/Dialect/TTNN/Utils/OptimizerOverrides.h +++ b/include/ttmlir/Dialect/TTNN/Utils/OptimizerOverrides.h @@ -70,14 +70,15 @@ class OptimizerOverridesHandler { // Wrapper methods we use to expose the adders to the python bindings std::unordered_map - getInputLayoutOverridesPybindWrapper() const; + getInputLayoutOverridesNanobindWrapper() const; std::unordered_map - getOutputLayoutOverridesPybindWrapper() const; + getOutputLayoutOverridesNanobindWrapper() const; // Wrapper methods we use to expose the adders to the python bindings - void addInputLayoutOverridePybindWrapper(std::string, std::vector &); - void addOutputLayoutOverridePybindWrapper(std::string, - OutputLayoutOverrideParams); + void addInputLayoutOverrideNanobindWrapper(std::string, + std::vector &); + void addOutputLayoutOverrideNanobindWrapper(std::string, + OutputLayoutOverrideParams); private: // Flags for enabling/disabling the optimizer passes diff --git a/lib/Dialect/TTNN/Utils/OptimizerOverrides.cpp b/lib/Dialect/TTNN/Utils/OptimizerOverrides.cpp index d7f0b52fd6..2a78ac27b4 100644 --- a/lib/Dialect/TTNN/Utils/OptimizerOverrides.cpp +++ b/lib/Dialect/TTNN/Utils/OptimizerOverrides.cpp @@ -83,7 +83,7 @@ OptimizerOverridesHandler::getOutputLayoutOverrides() const { } std::unordered_map -OptimizerOverridesHandler::getInputLayoutOverridesPybindWrapper() const { +OptimizerOverridesHandler::getInputLayoutOverridesNanobindWrapper() const { std::unordered_map inputLayoutOverridesWrapper; for (auto &entry : inputLayoutOverrides) { @@ -93,7 +93,7 @@ OptimizerOverridesHandler::getInputLayoutOverridesPybindWrapper() const { } std::unordered_map -OptimizerOverridesHandler::getOutputLayoutOverridesPybindWrapper() const { +OptimizerOverridesHandler::getOutputLayoutOverridesNanobindWrapper() const { std::unordered_map outputLayoutOverridesWrapper; for (auto &entry : outputLayoutOverrides) { @@ -190,7 +190,7 @@ void OptimizerOverridesHandler::addOutputLayoutOverride( std::move(grid), bufferType, tensorMemoryLayout, memoryLayout, dataType}; } -void OptimizerOverridesHandler::addInputLayoutOverridePybindWrapper( +void OptimizerOverridesHandler::addInputLayoutOverrideNanobindWrapper( std::string opName, std::vector &operandIdxes) { StringRef opNameStringRef(opName); SmallVector operandIdxesSmallVector(operandIdxes.begin(), @@ -198,7 +198,7 @@ void OptimizerOverridesHandler::addInputLayoutOverridePybindWrapper( addInputLayoutOverride(opNameStringRef, operandIdxesSmallVector); } -void OptimizerOverridesHandler::addOutputLayoutOverridePybindWrapper( +void OptimizerOverridesHandler::addOutputLayoutOverrideNanobindWrapper( std::string opName, OutputLayoutOverrideParams overrideParams) { StringRef opNameStringRef(opName); addOutputLayoutOverride(opNameStringRef, overrideParams); diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 604a192def..cfe8c21675 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -117,6 +117,7 @@ declare_mlir_python_extension(TTMLIRPythonExtensions.Main MLIRTestToLLVMIRTranslation MLIRVCIXToLLVMIRTranslation MLIRX86VectorToLLVMIRTranslation + PYTHON_BINDINGS_LIBRARY nanobind ) set(TTMLIR_PYTHON_SOURCES diff --git a/python/OptimizerOverrides.cpp b/python/OptimizerOverrides.cpp index 64e7a98499..f3ca0f3f20 100644 --- a/python/OptimizerOverrides.cpp +++ b/python/OptimizerOverrides.cpp @@ -8,11 +8,11 @@ namespace mlir::ttmlir::python { -void populateOptimizerOverridesModule(py::module &m) { +void populateOptimizerOverridesModule(nb::module_ &m) { - py::class_(m, + nb::class_(m, "OptimizerOverridesHandler") - .def(py::init<>()) + .def(nb::init<>()) .def("set_enable_optimizer", &tt::ttnn::OptimizerOverridesHandler::setEnableOptimizer) @@ -56,20 +56,21 @@ void populateOptimizerOverridesModule(py::module &m) { .def("get_input_layout_overrides", &tt::ttnn::OptimizerOverridesHandler:: - getInputLayoutOverridesPybindWrapper) + getInputLayoutOverridesNanobindWrapper) .def("get_output_layout_overrides", &tt::ttnn::OptimizerOverridesHandler:: - getOutputLayoutOverridesPybindWrapper) + getOutputLayoutOverridesNanobindWrapper) - .def("add_input_layout_override", &tt::ttnn::OptimizerOverridesHandler:: - addInputLayoutOverridePybindWrapper) + .def("add_input_layout_override", + &tt::ttnn::OptimizerOverridesHandler:: + addInputLayoutOverrideNanobindWrapper) .def("add_output_layout_override", &tt::ttnn::OptimizerOverridesHandler:: - addOutputLayoutOverridePybindWrapper) + addOutputLayoutOverrideNanobindWrapper) .def("to_string", &tt::ttnn::OptimizerOverridesHandler::toString); - py::enum_( + nb::enum_( m, "MemoryLayoutAnalysisPolicyType") .value("DFSharding", mlir::tt::MemoryLayoutAnalysisPolicyType::DFSharding) .value("GreedyL1Interleaved", @@ -77,26 +78,26 @@ void populateOptimizerOverridesModule(py::module &m) { .value("BFInterleaved", mlir::tt::MemoryLayoutAnalysisPolicyType::BFInterleaved); - py::enum_(m, "BufferType") + nb::enum_(m, "BufferType") .value("DRAM", mlir::tt::ttnn::BufferType::DRAM) .value("L1", mlir::tt::ttnn::BufferType::L1) .value("SystemMemory", mlir::tt::ttnn::BufferType::SystemMemory) .value("L1Small", mlir::tt::ttnn::BufferType::L1Small) .value("Trace", mlir::tt::ttnn::BufferType::Trace); - py::enum_(m, "Layout") + nb::enum_(m, "Layout") .value("RowMajor", mlir::tt::ttnn::Layout::RowMajor) .value("Tile", mlir::tt::ttnn::Layout::Tile) .value("Invalid", mlir::tt::ttnn::Layout::Invalid); - py::enum_(m, "TensorMemoryLayout") + nb::enum_(m, "TensorMemoryLayout") .value("Interleaved", mlir::tt::ttnn::TensorMemoryLayout::Interleaved) .value("SingleBank", mlir::tt::ttnn::TensorMemoryLayout::SingleBank) .value("HeightSharded", mlir::tt::ttnn::TensorMemoryLayout::HeightSharded) .value("WidthSharded", mlir::tt::ttnn::TensorMemoryLayout::WidthSharded) .value("BlockSharded", mlir::tt::ttnn::TensorMemoryLayout::BlockSharded); - py::enum_(m, "DataType") + nb::enum_(m, "DataType") .value("Float32", mlir::tt::DataType::Float32) .value("Float16", mlir::tt::DataType::Float16) .value("BFloat16", mlir::tt::DataType::BFloat16) @@ -111,10 +112,10 @@ void populateOptimizerOverridesModule(py::module &m) { .value("UInt8", mlir::tt::DataType::UInt8) .value("Int32", mlir::tt::DataType::Int32); - py::class_( + nb::class_( m, "InputLayoutOverrideParams") - .def(py::init<>()) - .def_property( + .def(nb::init<>()) + .def_prop_rw( "operand_idxes", [](const mlir::tt::ttnn::InputLayoutOverrideParams &obj) { // Getter: Convert SmallVector to std::vector @@ -128,10 +129,10 @@ void populateOptimizerOverridesModule(py::module &m) { obj.operandIdxes.append(input.begin(), input.end()); }); - py::class_( + nb::class_( m, "OutputLayoutOverrideParams") - .def(py::init<>()) - .def_property( + .def(nb::init<>()) + .def_prop_rw( "grid", [](const mlir::tt::ttnn::OutputLayoutOverrideParams &obj) { // Getter: Convert SmallVector to std::vector @@ -151,20 +152,20 @@ void populateOptimizerOverridesModule(py::module &m) { } obj.grid->append(input.begin(), input.end()); }) - .def_readwrite("buffer_type", - &mlir::tt::ttnn::OutputLayoutOverrideParams::bufferType) - .def_readwrite( - "tensor_memory_layout", - &mlir::tt::ttnn::OutputLayoutOverrideParams::tensorMemoryLayout) - .def_readwrite("memory_layout", - &mlir::tt::ttnn::OutputLayoutOverrideParams::memoryLayout) - .def_readwrite("data_type", - &mlir::tt::ttnn::OutputLayoutOverrideParams::dataType) + .def_rw("buffer_type", + &mlir::tt::ttnn::OutputLayoutOverrideParams::bufferType) + .def_rw("tensor_memory_layout", + &mlir::tt::ttnn::OutputLayoutOverrideParams::tensorMemoryLayout) + .def_rw("memory_layout", + &mlir::tt::ttnn::OutputLayoutOverrideParams::memoryLayout) + .def_rw("data_type", + &mlir::tt::ttnn::OutputLayoutOverrideParams::dataType) .def("set_buffer_type_from_str", [](mlir::tt::ttnn::OutputLayoutOverrideParams &obj, const std::string &value) { - if (auto bufferType = mlir::tt::ttnn::symbolizeBufferType(value)) { - obj.bufferType = bufferType; + if (auto bufferType_ = + mlir::tt::ttnn::symbolizeBufferType(value)) { + obj.bufferType = bufferType_; } else { throw std::invalid_argument("Invalid buffer type: " + value); } @@ -183,8 +184,8 @@ void populateOptimizerOverridesModule(py::module &m) { .def("set_memory_layout_from_str", [](mlir::tt::ttnn::OutputLayoutOverrideParams &obj, const std::string &value) { - if (auto memoryLayout = mlir::tt::ttnn::symbolizeLayout(value)) { - obj.memoryLayout = memoryLayout; + if (auto memoryLayout_ = mlir::tt::ttnn::symbolizeLayout(value)) { + obj.memoryLayout = memoryLayout_; } else { throw std::invalid_argument("Invalid memory layout: " + value); } @@ -192,8 +193,8 @@ void populateOptimizerOverridesModule(py::module &m) { .def("set_data_type_from_str", [](mlir::tt::ttnn::OutputLayoutOverrideParams &obj, const std::string &value) { - if (auto dataType = mlir::tt::DataTypeStringToEnum(value)) { - obj.dataType = dataType; + if (auto dataType_ = mlir::tt::DataTypeStringToEnum(value)) { + obj.dataType = dataType_; } else { throw std::invalid_argument("Invalid data type: " + value); } diff --git a/python/Overrides.cpp b/python/Overrides.cpp index b4aa1623fe..5fd7fb2700 100644 --- a/python/Overrides.cpp +++ b/python/Overrides.cpp @@ -6,11 +6,11 @@ namespace mlir::ttmlir::python { -void populateOverridesModule(py::module &m) { +void populateOverridesModule(nb::module_ &m) { m.def( "get_ptr", [](void *op) { return reinterpret_cast(op); }, - py::arg("op").noconvert()); + nb::arg("op").noconvert()); } } // namespace mlir::ttmlir::python diff --git a/python/Passes.cpp b/python/Passes.cpp index dcc252e276..a536494940 100644 --- a/python/Passes.cpp +++ b/python/Passes.cpp @@ -10,11 +10,11 @@ #include "ttmlir/Target/TTMetal/TTMetalToFlatbuffer.h" #include "ttmlir/Target/TTNN/TTNNToFlatbuffer.h" #include -#include +#include +#include // Make Opaque so Casts & Copies don't occur -PYBIND11_MAKE_OPAQUE(std::shared_ptr); -PYBIND11_MAKE_OPAQUE(std::vector>); +NB_MAKE_OPAQUE(std::vector>); namespace mlir::tt::ttnn { void registerTTNNToFlatbuffer(); @@ -22,7 +22,7 @@ void registerTTNNToFlatbuffer(); namespace mlir::ttmlir::python { -void populatePassesModule(py::module &m) { +void populatePassesModule(nb::module_ &m) { // When populating passes, need to first register them mlir::tt::registerAllPasses(); @@ -40,7 +40,7 @@ void populatePassesModule(py::module &m) { throw std::runtime_error("Failed to run pass manager"); } }, - py::arg("module"), py::arg("options") = ""); + nb::arg("module"), nb::arg("options") = ""); m.def( "ttnn_pipeline_analysis_passes", @@ -54,7 +54,7 @@ void populatePassesModule(py::module &m) { throw std::runtime_error("Failed to run pass manager"); } }, - py::arg("module"), py::arg("options") = ""); + nb::arg("module"), nb::arg("options") = ""); m.def( "ttnn_pipeline_lowering_passes", @@ -68,7 +68,7 @@ void populatePassesModule(py::module &m) { throw std::runtime_error("Failed to run pass manager"); } }, - py::arg("module"), py::arg("options") = ""); + nb::arg("module"), nb::arg("options") = ""); m.def( "ttnn_pipeline_layout_decomposition_pass", @@ -83,7 +83,7 @@ void populatePassesModule(py::module &m) { throw std::runtime_error("Failed to run pass manager"); } }, - py::arg("module"), py::arg("options") = ""); + nb::arg("module"), nb::arg("options") = ""); m.def( "ttnn_pipeline_dealloc_pass", @@ -97,7 +97,7 @@ void populatePassesModule(py::module &m) { throw std::runtime_error("Failed to run pass manager"); } }, - py::arg("module"), py::arg("options") = ""); + nb::arg("module"), nb::arg("options") = ""); m.def( "ttir_to_ttnn_backend_pipeline", @@ -125,7 +125,7 @@ void populatePassesModule(py::module &m) { throw std::runtime_error("Failed to run pass manager"); } }, - py::arg("module"), py::arg("options") = ""); + nb::arg("module"), nb::arg("options") = ""); m.def( "ttir_to_ttmetal_backend_pipeline", @@ -148,30 +148,11 @@ void populatePassesModule(py::module &m) { throw std::runtime_error("Failed to run pass manager"); } }, - py::arg("module"), py::arg("options") = ""); - - py::class_>(m, "SharedVoidPtr") - .def(py::init<>()) - .def("from_ttnn", [](std::shared_ptr data, MlirModule module) { - mlir::Operation *moduleOp = unwrap(mlirModuleGetOperation(module)); - data = mlir::tt::ttnn::ttnnToFlatbuffer(moduleOp); - }); - - m.def("ttnn_to_flatbuffer_binary", [](MlirModule module) { - // NOLINTBEGIN - mlir::Operation *moduleOp = unwrap(mlirModuleGetOperation(module)); - std::shared_ptr *binary = new std::shared_ptr(); - *binary = mlir::tt::ttnn::ttnnToFlatbuffer(moduleOp); - return py::capsule((void *)binary, [](void *data) { - std::shared_ptr *bin = static_cast *>(data); - delete bin; - }); - // NOLINTEND - }); + nb::arg("module"), nb::arg("options") = ""); // This binds the vector into an interfaceable object in python and also an // opaquely passed one into other functions. - py::bind_vector>>( + nb::bind_vector>>( m, "ModuleLog"); m.def( @@ -197,8 +178,8 @@ void populatePassesModule(py::module &m) { filepath); } }, - py::arg("module"), py::arg("filepath"), py::arg("goldenMap") = py::dict(), - py::arg("moduleCache") = + nb::arg("module"), nb::arg("filepath"), nb::arg("goldenMap") = nb::dict(), + nb::arg("moduleCache") = std::vector>()); m.def("ttmetal_to_flatbuffer_file", @@ -234,9 +215,9 @@ void populatePassesModule(py::module &m) { output_stream.flush(); return output; }, - py::arg("module"), py::arg("isTensixKernel")); + nb::arg("module"), nb::arg("isTensixKernel")); - py::enum_<::tt::target::DataType>(m, "DataType") + nb::enum_<::tt::target::DataType>(m, "DataType") .value("Float32", ::tt::target::DataType::Float32) .value("Float16", ::tt::target::DataType::Float16); @@ -255,39 +236,37 @@ void populatePassesModule(py::module &m) { return ::tt::target::DataType::MIN; }); - // Preserve the Data by holding it in a SharedPtr. - py::class_>( - m, "GoldenTensor") - .def(py::init([](std::string name, std::vector shape, - std::vector strides, - ::tt::target::DataType dtype, std::uintptr_t ptr, - std::size_t dataSize) { - // Create Golden Tensor and move ownership to GoldenTensor - auto *dataPtr = reinterpret_cast(ptr); - - return std::make_shared( - name, shape, strides, dtype, - std::vector(dataPtr, dataPtr + dataSize)); - })) - .def_readwrite("name", &mlir::tt::GoldenTensor::name) - .def_readwrite("shape", &mlir::tt::GoldenTensor::shape) - .def_readwrite("strides", &mlir::tt::GoldenTensor::strides) - .def_readwrite("dtype", &mlir::tt::GoldenTensor::dtype) - .def_readwrite("data", &mlir::tt::GoldenTensor::data); - - py::class_>(m, "MLIRModuleLogger") - .def(py::init<>()) + nb::class_(m, "GoldenTensor") + .def("__init__", + [](mlir::tt::GoldenTensor *self, std::string name, + std::vector shape, std::vector strides, + ::tt::target::DataType dtype, std::uintptr_t ptr, + std::size_t dataSize) { + new (self) mlir::tt::GoldenTensor( + name, shape, strides, dtype, + std::vector( + reinterpret_cast(ptr), + reinterpret_cast(ptr) + dataSize)); + }) + .def_rw("name", &mlir::tt::GoldenTensor::name) + .def_rw("shape", &mlir::tt::GoldenTensor::shape) + .def_rw("strides", &mlir::tt::GoldenTensor::strides) + .def_rw("dtype", &mlir::tt::GoldenTensor::dtype) + .def_rw("data", &mlir::tt::GoldenTensor::data); + + // Supposedly no need for shared_ptr holder types anymore, have python take + // ownership of ModuleLog + nb::class_(m, "MLIRModuleLogger") + .def(nb::init<>(), nb::rv_policy::take_ownership) .def( "attach_context", - [](std::shared_ptr &self, MlirContext ctx, + [](mlir::tt::MLIRModuleLogger *self, MlirContext ctx, std::vector &passnames_to_cache) { self->attachContext(unwrap(ctx), passnames_to_cache); }, - py::arg("ctx"), py::arg("passnames_to_cache") = py::list()) - .def_property_readonly( - "module_log", [](std::shared_ptr &self) { - return self->moduleCache; - }); + nb::arg("ctx"), nb::arg("passnames_to_cache") = nb::list()) + .def_prop_ro("module_log", [](mlir::tt::MLIRModuleLogger *self) { + return self->moduleCache; + }); } } // namespace mlir::ttmlir::python diff --git a/python/TTIRModule.cpp b/python/TTIRModule.cpp index 3dbb29f86f..2812ce7ba0 100644 --- a/python/TTIRModule.cpp +++ b/python/TTIRModule.cpp @@ -7,7 +7,7 @@ #include "mlir/CAPI/IR.h" namespace mlir::ttmlir::python { -void populateTTIRModule(py::module &m) { +void populateTTIRModule(nb::module_ &m) { m.def("is_dps", [](MlirOperation op) { return mlir::isa(unwrap(op)); }); diff --git a/python/TTKernelModule.cpp b/python/TTKernelModule.cpp index f6e43215d4..def8088402 100644 --- a/python/TTKernelModule.cpp +++ b/python/TTKernelModule.cpp @@ -12,8 +12,8 @@ #include "ttmlir/Dialect/TTKernel/IR/TTKernelOpsTypes.h" namespace mlir::ttmlir::python { -void populateTTKernelModule(py::module &m) { - py::class_(m, "CBType") +void populateTTKernelModule(nb::module_ &m) { + tt_type_class(m, "CBType") .def_static("get", [](MlirContext ctx, uint64_t address, uint64_t port, MlirType memrefType) { @@ -24,10 +24,10 @@ void populateTTKernelModule(py::module &m) { [](MlirType &ty) { return mlir::cast(unwrap(ty)); }) - .def_property_readonly("shape", - [](tt::ttkernel::CBType &cb) { - return std::vector(cb.getShape()); - }) - .def_property_readonly("memref", &tt::ttkernel::CBType::getMemref); + .def_prop_ro("shape", + [](tt::ttkernel::CBType &cb) { + return std::vector(cb.getShape()); + }) + .def_prop_ro("memref", &tt::ttkernel::CBType::getMemref); } } // namespace mlir::ttmlir::python diff --git a/python/TTMLIRModule.cpp b/python/TTMLIRModule.cpp index 2805399205..82734adabe 100644 --- a/python/TTMLIRModule.cpp +++ b/python/TTMLIRModule.cpp @@ -4,7 +4,7 @@ #include "ttmlir/Bindings/Python/TTMLIRModule.h" -PYBIND11_MODULE(_ttmlir, m) { +NB_MODULE(_ttmlir, m) { m.doc() = "ttmlir main python extension"; m.def( @@ -25,7 +25,7 @@ PYBIND11_MODULE(_ttmlir, m) { mlirDialectHandleLoadDialect(ttnn_handle, context); } }, - py::arg("context"), py::arg("load") = true); + nb::arg("context"), nb::arg("load") = true); auto tt_ir = m.def_submodule("tt_ir", "TT IR Bindings"); mlir::ttmlir::python::populateTTModule(tt_ir); diff --git a/python/TTModule.cpp b/python/TTModule.cpp index 31b6e1c084..4ff9bda0c8 100644 --- a/python/TTModule.cpp +++ b/python/TTModule.cpp @@ -15,7 +15,7 @@ #include "ttmlir/Utils.h" namespace mlir::ttmlir::python { -void populateTTModule(py::module &m) { +void populateTTModule(nb::module_ &m) { tt_attribute_class(m, "MetalLayoutAttr") .def_static("get", [](MlirContext ctx, MlirType rankedTensorType, @@ -65,16 +65,16 @@ void populateTTModule(py::module &m) { .withElementType(unwrap(ctx), unwrap(elementType)); }) .def("getLayout", - [](MlirType &type) -> std::variant { + [](MlirType &type) -> std::variant { // Make sure that this is operating on a RankedTensorType object if (not isa(unwrap(type))) { - return py::none(); + return nb::none(); } RankedTensorType tensor = mlir::cast(unwrap(type)); // Make sure that this Tensor has an encoding value if (not tensor.getEncoding()) { - return py::none(); + return nb::none(); } tt::MetalLayoutAttr layout = mlir::cast(tensor.getEncoding()); @@ -82,31 +82,29 @@ void populateTTModule(py::module &m) { }) .def("wrapped", [](tt::MetalLayoutAttr const &self) { return wrap(self); }) - .def_property_readonly("stride", - [](tt::MetalLayoutAttr const &self, - std::vector logicalShape) { - auto stride = self.getStride(logicalShape); - return std::vector(stride.begin(), - stride.end()); - }) - .def_property_readonly("oobval", &tt::MetalLayoutAttr::getOobVal) - .def_property_readonly("oobval_as_int", - [](tt::MetalLayoutAttr la) { - return static_cast(la.getOobVal()); - }) - .def_property_readonly("grid_attr", &tt::MetalLayoutAttr::getGrid) - .def_property_readonly( + .def_prop_ro("stride", + [](tt::MetalLayoutAttr const &self, + std::vector logicalShape) { + auto stride = self.getStride(logicalShape); + return std::vector(stride.begin(), + stride.end()); + }) + .def_prop_ro("oobval", &tt::MetalLayoutAttr::getOobVal) + .def_prop_ro("oobval_as_int", + [](tt::MetalLayoutAttr la) { + return static_cast(la.getOobVal()); + }) + .def_prop_ro("grid_attr", &tt::MetalLayoutAttr::getGrid) + .def_prop_ro( "memref", [](tt::MetalLayoutAttr self) { return wrap(self.getMemref()); }) - .def_property_readonly("memory_space", - &tt::MetalLayoutAttr::getMemorySpace) - .def_property_readonly("memory_space_as_int", - [](tt::MetalLayoutAttr la) { - return static_cast( - la.getMemorySpace()); - }) - .def_property_readonly("shard_shape", &tt::MetalLayoutAttr::getShardShape) - .def_property_readonly("linear", [](tt::MetalLayoutAttr self) { + .def_prop_ro("memory_space", &tt::MetalLayoutAttr::getMemorySpace) + .def_prop_ro("memory_space_as_int", + [](tt::MetalLayoutAttr la) { + return static_cast(la.getMemorySpace()); + }) + .def_prop_ro("shard_shape", &tt::MetalLayoutAttr::getShardShape) + .def_prop_ro("linear", [](tt::MetalLayoutAttr self) { return wrap(self.getLinear()); }); @@ -115,8 +113,8 @@ void populateTTModule(py::module &m) { [](MlirContext ctx, std::vector shape) { return wrap(tt::GridAttr::get(unwrap(ctx), shape)); }) - .def_property_readonly( - "shape", [](tt::GridAttr const &ga) { return ga.getShape().vec(); }); + .def_prop_ro("shape", + [](tt::GridAttr const &ga) { return ga.getShape().vec(); }); tt_attribute_class(m, "ChipCapabilityAttr") .def_static( @@ -125,10 +123,9 @@ void populateTTModule(py::module &m) { return wrap(tt::ChipCapabilityAttr::get( unwrap(ctx), static_cast(chipCapability))); }) - .def_property_readonly("capability_as_int", - [](tt::ChipCapabilityAttr self) { - return static_cast(self.getValue()); - }); + .def_prop_ro("capability_as_int", [](tt::ChipCapabilityAttr self) { + return static_cast(self.getValue()); + }); tt_attribute_class(m, "ArchAttr") .def_static("get", @@ -136,7 +133,7 @@ void populateTTModule(py::module &m) { return wrap(tt::ArchAttr::get(unwrap(ctx), static_cast(arch))); }) - .def_property_readonly("arch_as_int", [](tt::ArchAttr self) { + .def_prop_ro("arch_as_int", [](tt::ArchAttr self) { return static_cast(self.getValue()); }); @@ -147,7 +144,7 @@ void populateTTModule(py::module &m) { return wrap(tt::DataTypeAttr::get( unwrap(ctx), static_cast(*supportedDataTypes))); }) - .def_property_readonly("data_type_as_int", [](tt::DataTypeAttr self) { + .def_prop_ro("data_type_as_int", [](tt::DataTypeAttr self) { return static_cast(self.getValue()); }); @@ -174,51 +171,47 @@ void populateTTModule(py::module &m) { mlir::cast(unwrap(supportedTileSizes)), numCBs)); }) - .def_property_readonly("usable_l1_size", - &tt::ChipDescAttr::getUsableL1Size) - .def_property_readonly("usable_dram_channel_size", - &tt::ChipDescAttr::getUsableDramChannelSize) - .def_property_readonly("arch", &tt::ChipDescAttr::getArch) - .def_property_readonly( - "grid", [](tt::ChipDescAttr self) { return self.getGrid().vec(); }) - .def_property_readonly("l1_size", &tt::ChipDescAttr::getL1Size) - .def_property_readonly("num_dram_channels", - &tt::ChipDescAttr::getNumDramChannels) - .def_property_readonly("dram_channel_size", - &tt::ChipDescAttr::getDramChannelSize) - .def_property_readonly("noc_l1_address_align_bytes", - &tt::ChipDescAttr::getNocL1AddressAlignBytes) - .def_property_readonly("pcie_address_align_bytes", - &tt::ChipDescAttr::getPcieAddressAlignBytes) - .def_property_readonly("noc_dram_address_align_bytes", - &tt::ChipDescAttr::getNocDRAMAddressAlignBytes) - .def_property_readonly("l1_unreserved_base", - &tt::ChipDescAttr::getL1UnreservedBase) - .def_property_readonly("erisc_l1_unreserved_base", - &tt::ChipDescAttr::getEriscL1UnreservedBase) - .def_property_readonly("dram_unreserved_base", - &tt::ChipDescAttr::getDramUnreservedBase) - .def_property_readonly("dram_unreserved_end", - &tt::ChipDescAttr::getDramUnreservedEnd) - .def_property_readonly("chip_physical_cores", - &tt::ChipDescAttr::getChipPhysicalCores) - .def_property_readonly("supported_data_types", - [](tt::ChipDescAttr self) { - return self.getSupportedDataTypes().vec(); - }) - .def_property_readonly("supported_tile_sizes", - [](tt::ChipDescAttr self) { - return self.getSupportedTileSizes().vec(); - }) - .def_property_readonly("num_cbs", &tt::ChipDescAttr::getNumCBs); + .def_prop_ro("usable_l1_size", &tt::ChipDescAttr::getUsableL1Size) + .def_prop_ro("usable_dram_channel_size", + &tt::ChipDescAttr::getUsableDramChannelSize) + .def_prop_ro("arch", &tt::ChipDescAttr::getArch) + .def_prop_ro("grid", + [](tt::ChipDescAttr self) { return self.getGrid().vec(); }) + .def_prop_ro("l1_size", &tt::ChipDescAttr::getL1Size) + .def_prop_ro("num_dram_channels", &tt::ChipDescAttr::getNumDramChannels) + .def_prop_ro("dram_channel_size", &tt::ChipDescAttr::getDramChannelSize) + .def_prop_ro("noc_l1_address_align_bytes", + &tt::ChipDescAttr::getNocL1AddressAlignBytes) + .def_prop_ro("pcie_address_align_bytes", + &tt::ChipDescAttr::getPcieAddressAlignBytes) + .def_prop_ro("noc_dram_address_align_bytes", + &tt::ChipDescAttr::getNocDRAMAddressAlignBytes) + .def_prop_ro("l1_unreserved_base", &tt::ChipDescAttr::getL1UnreservedBase) + .def_prop_ro("erisc_l1_unreserved_base", + &tt::ChipDescAttr::getEriscL1UnreservedBase) + .def_prop_ro("dram_unreserved_base", + &tt::ChipDescAttr::getDramUnreservedBase) + .def_prop_ro("dram_unreserved_end", + &tt::ChipDescAttr::getDramUnreservedEnd) + .def_prop_ro("chip_physical_cores", + &tt::ChipDescAttr::getChipPhysicalCores) + .def_prop_ro("supported_data_types", + [](tt::ChipDescAttr self) { + return self.getSupportedDataTypes().vec(); + }) + .def_prop_ro("supported_tile_sizes", + [](tt::ChipDescAttr self) { + return self.getSupportedTileSizes().vec(); + }) + .def_prop_ro("num_cbs", &tt::ChipDescAttr::getNumCBs); tt_attribute_class(m, "TileSizeAttr") .def_static("get", [](MlirContext ctx, int64_t y, int64_t x) { return wrap(tt::TileSizeAttr::get(unwrap(ctx), y, x)); }) - .def_property_readonly("y", &tt::TileSizeAttr::getY) - .def_property_readonly("x", &tt::TileSizeAttr::getX); + .def_prop_ro("y", &tt::TileSizeAttr::getY) + .def_prop_ro("x", &tt::TileSizeAttr::getX); tt_attribute_class(m, "ChipPhysicalCoresAttr") .def_static("get", @@ -229,27 +222,26 @@ void populateTTModule(py::module &m) { return wrap(tt::ChipPhysicalCoresAttr::get( unwrap(ctx), worker, dram, eth, eth_inactive)); }) - .def_property_readonly( + .def_prop_ro( "worker", [](tt::ChipPhysicalCoresAttr self) { return self.getWorker().vec(); }) - .def_property_readonly( + .def_prop_ro( "dram", [](tt::ChipPhysicalCoresAttr self) { return self.getDram().vec(); }) - .def_property_readonly( + .def_prop_ro( "eth", [](tt::ChipPhysicalCoresAttr self) { return self.getEth().vec(); }) - .def_property_readonly("eth_inactive", - [](tt::ChipPhysicalCoresAttr self) { - return self.getEthInactive().vec(); - }); + .def_prop_ro("eth_inactive", [](tt::ChipPhysicalCoresAttr self) { + return self.getEthInactive().vec(); + }); tt_attribute_class(m, "CoreCoordAttr") .def_static("get", [](MlirContext ctx, int64_t y, int64_t x) { return wrap(tt::CoreCoordAttr::get(unwrap(ctx), y, x)); }) - .def_property_readonly("y", &tt::CoreCoordAttr::getY) - .def_property_readonly("x", &tt::CoreCoordAttr::getX); + .def_prop_ro("y", &tt::CoreCoordAttr::getY) + .def_prop_ro("x", &tt::CoreCoordAttr::getX); tt_attribute_class(m, "ChipCoordAttr") .def_static("get", @@ -258,10 +250,10 @@ void populateTTModule(py::module &m) { return wrap( tt::ChipCoordAttr::get(unwrap(ctx), rack, shelf, y, x)); }) - .def_property_readonly("rack", &tt::ChipCoordAttr::getRack) - .def_property_readonly("shelf", &tt::ChipCoordAttr::getShelf) - .def_property_readonly("y", &tt::ChipCoordAttr::getY) - .def_property_readonly("x", &tt::ChipCoordAttr::getX); + .def_prop_ro("rack", &tt::ChipCoordAttr::getRack) + .def_prop_ro("shelf", &tt::ChipCoordAttr::getShelf) + .def_prop_ro("y", &tt::ChipCoordAttr::getY) + .def_prop_ro("x", &tt::ChipCoordAttr::getX); tt_attribute_class(m, "ChipChannelAttr") .def_static( @@ -273,16 +265,15 @@ void populateTTModule(py::module &m) { ethernetCoreCoord0, deviceId1, ethernetCoreCoord1)); }) - .def_property_readonly("device_id0", &tt::ChipChannelAttr::getDeviceId0) - .def_property_readonly("ethernet_core_coord0", - [](tt::ChipChannelAttr self) { - return self.getEthernetCoreCoord0().vec(); - }) - .def_property_readonly("device_id1", &tt::ChipChannelAttr::getDeviceId1) - .def_property_readonly("ethernet_core_coord1", - [](tt::ChipChannelAttr self) { - return self.getEthernetCoreCoord1().vec(); - }); + .def_prop_ro("device_id0", &tt::ChipChannelAttr::getDeviceId0) + .def_prop_ro("ethernet_core_coord0", + [](tt::ChipChannelAttr self) { + return self.getEthernetCoreCoord0().vec(); + }) + .def_prop_ro("device_id1", &tt::ChipChannelAttr::getDeviceId1) + .def_prop_ro("ethernet_core_coord1", [](tt::ChipChannelAttr self) { + return self.getEthernetCoreCoord1().vec(); + }); tt_attribute_class(m, "SystemDescAttr") .def_static("get_default", @@ -327,21 +318,21 @@ void populateTTModule(py::module &m) { chipDescIndices, chipCapabilitiesUnwrapped, chipCoordsUnwrapped, chipChannelsUnwrapped)); }) - .def_property_readonly( + .def_prop_ro( "chip_descs", [](tt::SystemDescAttr self) { return self.getChipDescs().vec(); }) - .def_property_readonly("chip_desc_indices", - [](tt::SystemDescAttr self) { - return self.getChipDescIndices().vec(); - }) - .def_property_readonly("chip_capabilities", - [](tt::SystemDescAttr self) { - return self.getChipCapabilities().vec(); - }) - .def_property_readonly( + .def_prop_ro("chip_desc_indices", + [](tt::SystemDescAttr self) { + return self.getChipDescIndices().vec(); + }) + .def_prop_ro("chip_capabilities", + [](tt::SystemDescAttr self) { + return self.getChipCapabilities().vec(); + }) + .def_prop_ro( "chip_coords", [](tt::SystemDescAttr self) { return self.getChipCoords().vec(); }) - .def_property_readonly("chip_channels", [](tt::SystemDescAttr self) { + .def_prop_ro("chip_channels", [](tt::SystemDescAttr self) { return self.getChipChannels().vec(); }); @@ -352,10 +343,9 @@ void populateTTModule(py::module &m) { return wrap(tt::MemorySpaceAttr::get( unwrap(ctx), static_cast(memorySpace))); }) - .def_property_readonly("memory_space_as_int", - [](tt::MemorySpaceAttr self) { - return static_cast(self.getValue()); - }); + .def_prop_ro("memory_space_as_int", [](tt::MemorySpaceAttr self) { + return static_cast(self.getValue()); + }); tt_attribute_class(m, "OOBValAttr") .def_static("get", @@ -363,7 +353,7 @@ void populateTTModule(py::module &m) { return wrap(tt::OOBValAttr::get( unwrap(ctx), static_cast(oobVal))); }) - .def_property_readonly("oob_val_as_int", [](tt::OOBValAttr self) { + .def_prop_ro("oob_val_as_int", [](tt::OOBValAttr self) { return static_cast(self.getValue()); }); @@ -374,10 +364,9 @@ void populateTTModule(py::module &m) { return wrap(tt::IteratorTypeAttr::get( unwrap(ctx), static_cast(iteratorType))); }) - .def_property_readonly("iterator_type_as_int", - [](tt::IteratorTypeAttr self) { - return static_cast(self.getValue()); - }); + .def_prop_ro("iterator_type_as_int", [](tt::IteratorTypeAttr self) { + return static_cast(self.getValue()); + }); tt_type_class(m, "DeviceType") .def_static( @@ -386,9 +375,8 @@ void populateTTModule(py::module &m) { return wrap(tt::DeviceType::get( unwrap(ctx), mlir::cast(unwrap(deviceAttr)))); }) - .def_property_readonly("device_attr", [](tt::DeviceType const &self) { - return self.getDesc(); - }); + .def_prop_ro("device_attr", + [](tt::DeviceType const &self) { return self.getDesc(); }); tt_attribute_class(m, "DeviceAttr") .def_static("from_system_desc", @@ -414,16 +402,15 @@ void populateTTModule(py::module &m) { [](MlirAttribute const &self) { return mlir::cast(unwrap(self)); }) - .def_property_readonly("grid_attr", &tt::DeviceAttr::getWorkerGrid) - .def_property_readonly( - "l1_map", [](tt::DeviceAttr self) { return wrap(self.getL1Map()); }) - .def_property_readonly( - "dram_map", - [](tt::DeviceAttr self) { return wrap(self.getDramMap()); }) - .def_property_readonly( + .def_prop_ro("grid_attr", &tt::DeviceAttr::getWorkerGrid) + .def_prop_ro("l1_map", + [](tt::DeviceAttr self) { return wrap(self.getL1Map()); }) + .def_prop_ro("dram_map", + [](tt::DeviceAttr self) { return wrap(self.getDramMap()); }) + .def_prop_ro( "mesh_shape", [](tt::DeviceAttr const &self) { return self.getMeshShape().vec(); }) - .def_property_readonly("chip_ids", [](tt::DeviceAttr const &self) { + .def_prop_ro("chip_ids", [](tt::DeviceAttr const &self) { return self.getChipIds().vec(); }); @@ -435,11 +422,11 @@ void populateTTModule(py::module &m) { unwrap(ctx), SmallVector{height, width}, static_cast(dataType))); }) - .def_property_readonly("data_type_as_int", - [](tt::TileType self) { - return static_cast(self.getDataType()); - }) - .def_property_readonly("shape", [](tt::TileType const &tile) { + .def_prop_ro("data_type_as_int", + [](tt::TileType self) { + return static_cast(self.getDataType()); + }) + .def_prop_ro("shape", [](tt::TileType const &tile) { return std::vector({tile.getHeight(), tile.getWidth()}); }); } diff --git a/python/TTNNModule.cpp b/python/TTNNModule.cpp index 3610c1d211..83ddac1001 100644 --- a/python/TTNNModule.cpp +++ b/python/TTNNModule.cpp @@ -6,7 +6,7 @@ #include "ttmlir/Bindings/Python/TTMLIRModule.h" namespace mlir::ttmlir::python { -void populateTTNNModule(py::module &m) { +void populateTTNNModule(nb::module_ &m) { tt_attribute_class(m, "CoreRangeAttr") .def_static("get", @@ -27,12 +27,12 @@ void populateTTNNModule(py::module &m) { unwrap(ctx), mlir::cast(unwrap(grid)), offsetVec)); }, - py::arg("ctx"), py::arg("grid"), - py::arg("offset") = std::vector{0, 0}) - .def_property_readonly( + nb::arg("ctx"), nb::arg("grid"), + nb::arg("offset") = std::vector{0, 0}) + .def_prop_ro( "offset", [](tt::ttnn::CoreRangeAttr self) { return self.getOffset().vec(); }) - .def_property_readonly("size", [](tt::ttnn::CoreRangeAttr self) { + .def_prop_ro("size", [](tt::ttnn::CoreRangeAttr self) { return self.getSize().vec(); }); @@ -42,7 +42,7 @@ void populateTTNNModule(py::module &m) { return wrap(tt::ttnn::LayoutAttr::get( unwrap(ctx), static_cast(layout))); }) - .def_property_readonly("value", [](tt::ttnn::LayoutAttr self) { + .def_prop_ro("value", [](tt::ttnn::LayoutAttr self) { return static_cast(self.getValue()); }); @@ -54,10 +54,9 @@ void populateTTNNModule(py::module &m) { unwrap(ctx), static_cast( tensorMemoryLayout))); }) - .def_property_readonly("value", - [](tt::ttnn::TensorMemoryLayoutAttr self) { - return static_cast(self.getValue()); - }); + .def_prop_ro("value", [](tt::ttnn::TensorMemoryLayoutAttr self) { + return static_cast(self.getValue()); + }); tt_attribute_class(m, "BufferTypeAttr") .def_static( "get", @@ -65,7 +64,7 @@ void populateTTNNModule(py::module &m) { return wrap(tt::ttnn::BufferTypeAttr::get( unwrap(ctx), static_cast(bufferType))); }) - .def_property_readonly("value", [](tt::ttnn::BufferTypeAttr self) { + .def_prop_ro("value", [](tt::ttnn::BufferTypeAttr self) { return static_cast(self.getValue()); }); @@ -75,8 +74,7 @@ void populateTTNNModule(py::module &m) { return wrap( tt::ttnn::ShardSpecAttr::get(unwrap(ctx), shardShape)); }) - .def_property_readonly("shard_shape", - &tt::ttnn::ShardSpecAttr::getShardShape); + .def_prop_ro("shard_shape", &tt::ttnn::ShardSpecAttr::getShardShape); tt_attribute_class(m, "MemoryConfigAttr") .def_static("get", @@ -106,19 +104,17 @@ void populateTTNNModule(py::module &m) { tt::ttnn::ShapeAttr::get(unwrap(ctx), shardShape)), layoutAttr)); }) - .def_property_readonly("tensor_memory_layout", - &tt::ttnn::MemoryConfigAttr::getTensorMemoryLayout) - .def_property_readonly("buffer_type", - &tt::ttnn::MemoryConfigAttr::getBufferType) - .def_property_readonly("shard_spec", - &tt::ttnn::MemoryConfigAttr::getShardSpec); + .def_prop_ro("tensor_memory_layout", + &tt::ttnn::MemoryConfigAttr::getTensorMemoryLayout) + .def_prop_ro("buffer_type", &tt::ttnn::MemoryConfigAttr::getBufferType) + .def_prop_ro("shard_spec", &tt::ttnn::MemoryConfigAttr::getShardSpec); tt_attribute_class(m, "ShapeAttr") .def_static("get", [](MlirContext ctx, std::vector shape) { return wrap(tt::ttnn::ShapeAttr::get(unwrap(ctx), shape)); }) - .def_property_readonly("shape", [](tt::ttnn::ShapeAttr self) { + .def_prop_ro("shape", [](tt::ttnn::ShapeAttr self) { return std::vector(self.getShape().begin(), self.getShape().end()); }); @@ -129,8 +125,8 @@ void populateTTNNModule(py::module &m) { return wrap( tt::ttnn::MeshShapeAttr::get(unwrap(ctx), y, x)); }) - .def_property_readonly("y", &tt::ttnn::MeshShapeAttr::getY) - .def_property_readonly("x", &tt::ttnn::MeshShapeAttr::getX); + .def_prop_ro("y", &tt::ttnn::MeshShapeAttr::getY) + .def_prop_ro("x", &tt::ttnn::MeshShapeAttr::getX); tt_attribute_class(m, "TTNNLayoutAttr") .def_static( @@ -149,29 +145,28 @@ void populateTTNNModule(py::module &m) { mlir::cast(unwrap(grid)), mlir::cast(unwrap(memref)), memLayoutAttr)); }) - .def_property_readonly( + .def_prop_ro( "linear", [](tt::ttnn::TTNNLayoutAttr self) { return wrap(self.getLinear()); }) - .def_property_readonly("grid_attr", &tt::ttnn::TTNNLayoutAttr::getGrid) - .def_property_readonly( + .def_prop_ro("grid_attr", &tt::ttnn::TTNNLayoutAttr::getGrid) + .def_prop_ro( "memref", [](tt::ttnn::TTNNLayoutAttr self) { return wrap(self.getMemref()); }) - .def_property_readonly("tensor_memory_layout_as_int", - [](tt::ttnn::TTNNLayoutAttr self) - -> std::variant { - if (!self.getMemLayout()) { - return py::none(); - } - return static_cast( - self.getMemLayout().getValue()); - }) - .def_property_readonly("memory_layout_as_int", - [](tt::ttnn::TTNNLayoutAttr self) { - return static_cast(self.getLayout()); - }) - .def_property_readonly("data_type_as_int", - [](tt::ttnn::TTNNLayoutAttr self) { - return static_cast(self.getDataType()); - }); + .def_prop_ro("tensor_memory_layout_as_int", + [](tt::ttnn::TTNNLayoutAttr self) + -> std::variant { + if (!self.getMemLayout()) { + return nb::none(); + } + return static_cast( + self.getMemLayout().getValue()); + }) + .def_prop_ro("memory_layout_as_int", + [](tt::ttnn::TTNNLayoutAttr self) { + return static_cast(self.getLayout()); + }) + .def_prop_ro("data_type_as_int", [](tt::ttnn::TTNNLayoutAttr self) { + return static_cast(self.getDataType()); + }); } } // namespace mlir::ttmlir::python diff --git a/python/Util.cpp b/python/Util.cpp index b8bf220de1..099714ceea 100644 --- a/python/Util.cpp +++ b/python/Util.cpp @@ -3,12 +3,11 @@ // SPDX-License-Identifier: Apache-2.0 #include "ttmlir/Bindings/Python/TTMLIRModule.h" -#include #include namespace mlir::ttmlir::python { -void populateUtilModule(py::module &m) { +void populateUtilModule(nb::module_ &m) { m.def("debug_print_module", [](MlirModule module) { std::string source; llvm::raw_string_ostream os(source); @@ -20,17 +19,17 @@ void populateUtilModule(py::module &m) { }); m.def("get_loc_name", - [](MlirLocation _loc) -> std::variant { + [](MlirLocation _loc) -> std::variant { mlir::Location loc = unwrap(_loc); if (mlir::isa(loc)) { mlir::NameLoc nameLoc = mlir::cast(loc); return nameLoc.getName().str(); } - return py::none(); + return nb::none(); }); m.def("get_loc_full", - [](MlirLocation _loc) -> std::variant { + [](MlirLocation _loc) -> std::variant { mlir::Location loc = unwrap(_loc); std::string locationStr; From feca8d7c96e12662669f17455528dac078441124 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 5 Mar 2025 20:13:13 +0000 Subject: [PATCH 2/4] Explorer + TTIR-Builder Tested Changes --- .../ttmlir/Target/Utils/MLIRToFlatbuffer.h | 3 ++ python/Passes.cpp | 10 ++++- python/Util.cpp | 39 +++++++++---------- 3 files changed, 31 insertions(+), 21 deletions(-) diff --git a/include/ttmlir/Target/Utils/MLIRToFlatbuffer.h b/include/ttmlir/Target/Utils/MLIRToFlatbuffer.h index decc6e01e4..460aa149e0 100644 --- a/include/ttmlir/Target/Utils/MLIRToFlatbuffer.h +++ b/include/ttmlir/Target/Utils/MLIRToFlatbuffer.h @@ -31,6 +31,9 @@ struct GoldenTensor { std::vector &&_data) : name(name), shape(shape), strides(strides), dtype(dtype), data(std::move(_data)) {} + + // Create an explicit empty constructor + GoldenTensor() {} }; inline ::tt::target::OOBVal toFlatbuffer(FlatbufferObjectCache &, diff --git a/python/Passes.cpp b/python/Passes.cpp index a536494940..e11d3f873b 100644 --- a/python/Passes.cpp +++ b/python/Passes.cpp @@ -10,11 +10,14 @@ #include "ttmlir/Target/TTMetal/TTMetalToFlatbuffer.h" #include "ttmlir/Target/TTNN/TTNNToFlatbuffer.h" #include +#include #include #include // Make Opaque so Casts & Copies don't occur NB_MAKE_OPAQUE(std::vector>); +NB_MAKE_OPAQUE(mlir::tt::GoldenTensor); +NB_MAKE_OPAQUE(std::unordered_map); namespace mlir::tt::ttnn { void registerTTNNToFlatbuffer(); @@ -155,6 +158,9 @@ void populatePassesModule(nb::module_ &m) { nb::bind_vector>>( m, "ModuleLog"); + nb::bind_map>( + m, "GoldenMap"); + m.def( "ttnn_to_flatbuffer_file", [](MlirModule module, std::string &filepath, @@ -178,7 +184,9 @@ void populatePassesModule(nb::module_ &m) { filepath); } }, - nb::arg("module"), nb::arg("filepath"), nb::arg("goldenMap") = nb::dict(), + nb::arg("module"), nb::arg("filepath"), + nb::arg("goldenMap") = + std::unordered_map(), nb::arg("moduleCache") = std::vector>()); diff --git a/python/Util.cpp b/python/Util.cpp index 099714ceea..7fe690e909 100644 --- a/python/Util.cpp +++ b/python/Util.cpp @@ -18,26 +18,25 @@ void populateUtilModule(nb::module_ &m) { return source; }); - m.def("get_loc_name", - [](MlirLocation _loc) -> std::variant { - mlir::Location loc = unwrap(_loc); - if (mlir::isa(loc)) { - mlir::NameLoc nameLoc = mlir::cast(loc); - return nameLoc.getName().str(); - } - return nb::none(); - }); - - m.def("get_loc_full", - [](MlirLocation _loc) -> std::variant { - mlir::Location loc = unwrap(_loc); - - std::string locationStr; - llvm::raw_string_ostream output(locationStr); - loc.print(output); - - return locationStr; - }); + m.def("get_loc_name", [](MlirLocation _loc) -> nb::object { + mlir::Location loc = unwrap(_loc); + if (mlir::isa(loc)) { + mlir::NameLoc nameLoc = mlir::cast(loc); + return nb::str(nameLoc.getName().str().c_str()); + } + return nb::none(); + }); + + m.def("get_loc_full", [](MlirLocation _loc) { + mlir::Location loc = unwrap(_loc); + + std::string locationStr; + llvm::raw_string_ostream output(locationStr); + loc.print(output); + output.flush(); + + return locationStr; + }); } } // namespace mlir::ttmlir::python From 89dc29a5e1ffe52dbfe0f20f7cfb1658102a5fd1 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Wed, 5 Mar 2025 20:28:32 +0000 Subject: [PATCH 3/4] Included required Variant Header --- include/ttmlir/Bindings/Python/TTMLIRModule.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/ttmlir/Bindings/Python/TTMLIRModule.h b/include/ttmlir/Bindings/Python/TTMLIRModule.h index a01ae0dc84..5e6692a93e 100644 --- a/include/ttmlir/Bindings/Python/TTMLIRModule.h +++ b/include/ttmlir/Bindings/Python/TTMLIRModule.h @@ -22,6 +22,7 @@ #include "ttmlir/RegisterAll.h" #include "llvm/Support/CommandLine.h" +#include #include namespace nb = nanobind; From eb1b060a64c8794cc4f83ff603bc297e0ac69319 Mon Sep 17 00:00:00 2001 From: vprajapati-tt Date: Thu, 6 Mar 2025 16:27:59 +0000 Subject: [PATCH 4/4] nit: default constructor --- include/ttmlir/Target/Utils/MLIRToFlatbuffer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/ttmlir/Target/Utils/MLIRToFlatbuffer.h b/include/ttmlir/Target/Utils/MLIRToFlatbuffer.h index 460aa149e0..653196b38f 100644 --- a/include/ttmlir/Target/Utils/MLIRToFlatbuffer.h +++ b/include/ttmlir/Target/Utils/MLIRToFlatbuffer.h @@ -33,7 +33,7 @@ struct GoldenTensor { data(std::move(_data)) {} // Create an explicit empty constructor - GoldenTensor() {} + GoldenTensor() = default; }; inline ::tt::target::OOBVal toFlatbuffer(FlatbufferObjectCache &,